1 /* $NetBSD: pq3etsec.c,v 1.28 2016/07/26 01:36:50 nonaka Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 #include "opt_mpc85xx.h" 39 #include "opt_multiprocessor.h" 40 #include "opt_net_mpsafe.h" 41 42 #include <sys/cdefs.h> 43 44 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.28 2016/07/26 01:36:50 nonaka Exp $"); 45 46 #include <sys/param.h> 47 #include <sys/cpu.h> 48 #include <sys/device.h> 49 #include <sys/mbuf.h> 50 #include <sys/ioctl.h> 51 #include <sys/intr.h> 52 #include <sys/bus.h> 53 #include <sys/kernel.h> 54 #include <sys/kmem.h> 55 #include <sys/proc.h> 56 #include <sys/atomic.h> 57 #include <sys/callout.h> 58 #include <sys/sysctl.h> 59 60 #include <net/if.h> 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 #include <net/if_media.h> 64 65 #include <dev/mii/miivar.h> 66 67 #include <net/bpf.h> 68 69 #ifdef INET 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/in_offload.h> 74 #endif /* INET */ 75 #ifdef INET6 76 #include <netinet6/in6.h> 77 #include <netinet/ip6.h> 78 #endif 79 #include <netinet6/in6_offload.h> 80 81 #include <powerpc/spr.h> 82 #include <powerpc/booke/spr.h> 83 84 #include <powerpc/booke/cpuvar.h> 85 #include <powerpc/booke/e500var.h> 86 #include <powerpc/booke/e500reg.h> 87 #include <powerpc/booke/etsecreg.h> 88 89 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 90 91 #define ETSEC_MAXTXMBUFS 30 92 #define ETSEC_NTXSEGS 30 93 #define ETSEC_MAXRXMBUFS 511 94 #define ETSEC_MINRXMBUFS 32 95 #define ETSEC_NRXSEGS 1 96 97 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 98 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 99 |IFCAP_CSUM_UDPv4_Rx\ 100 |IFCAP_CSUM_TCPv6_Rx\ 101 |IFCAP_CSUM_UDPv6_Rx) 102 103 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 104 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 105 |IFCAP_CSUM_UDPv4_Tx\ 106 |IFCAP_CSUM_TCPv6_Tx\ 107 |IFCAP_CSUM_UDPv6_Tx) 108 109 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 110 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 111 112 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 113 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 114 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 115 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 116 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 117 #define M_CSUM_CIP (M_CSUM_IPv4) 118 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 119 120 struct pq3etsec_txqueue { 121 bus_dmamap_t txq_descmap; 122 volatile struct txbd *txq_consumer; 123 volatile struct txbd *txq_producer; 124 volatile struct txbd *txq_first; 125 volatile struct txbd *txq_last; 126 struct ifqueue txq_mbufs; 127 struct mbuf *txq_next; 128 #ifdef ETSEC_DEBUG 129 struct mbuf *txq_lmbufs[512]; 130 #endif 131 uint32_t txq_qmask; 132 uint32_t txq_free; 133 uint32_t txq_threshold; 134 uint32_t txq_lastintr; 135 bus_size_t txq_reg_tbase; 136 bus_dma_segment_t txq_descmap_seg; 137 }; 138 139 struct pq3etsec_rxqueue { 140 bus_dmamap_t rxq_descmap; 141 volatile struct rxbd *rxq_consumer; 142 volatile struct rxbd *rxq_producer; 143 volatile struct rxbd *rxq_first; 144 volatile struct rxbd *rxq_last; 145 struct mbuf *rxq_mhead; 146 struct mbuf **rxq_mtail; 147 struct mbuf *rxq_mconsumer; 148 #ifdef ETSEC_DEBUG 149 struct mbuf *rxq_mbufs[512]; 150 #endif 151 uint32_t rxq_qmask; 152 uint32_t rxq_inuse; 153 uint32_t rxq_threshold; 154 bus_size_t rxq_reg_rbase; 155 bus_size_t rxq_reg_rbptr; 156 bus_dma_segment_t rxq_descmap_seg; 157 }; 158 159 struct pq3etsec_mapcache { 160 u_int dmc_nmaps; 161 u_int dmc_maxseg; 162 u_int dmc_maxmaps; 163 u_int dmc_maxmapsize; 164 bus_dmamap_t dmc_maps[0]; 165 }; 166 167 struct pq3etsec_softc { 168 device_t sc_dev; 169 device_t sc_mdio_dev; 170 struct ethercom sc_ec; 171 #define sc_if sc_ec.ec_if 172 struct mii_data sc_mii; 173 bus_space_tag_t sc_bst; 174 bus_space_handle_t sc_bsh; 175 bus_space_handle_t sc_mdio_bsh; 176 bus_dma_tag_t sc_dmat; 177 int sc_phy_addr; 178 prop_dictionary_t sc_intrmap; 179 uint32_t sc_intrmask; 180 181 uint32_t sc_soft_flags; 182 #define SOFT_RESET 0x0001 183 #define SOFT_RXINTR 0x0010 184 #define SOFT_RXBSY 0x0020 185 #define SOFT_TXINTR 0x0100 186 #define SOFT_TXERROR 0x0200 187 188 struct pq3etsec_txqueue sc_txq; 189 struct pq3etsec_rxqueue sc_rxq; 190 uint32_t sc_txerrors; 191 uint32_t sc_rxerrors; 192 193 size_t sc_rx_adjlen; 194 195 /* 196 * Copies of various ETSEC registers. 197 */ 198 uint32_t sc_imask; 199 uint32_t sc_maccfg1; 200 uint32_t sc_maccfg2; 201 uint32_t sc_maxfrm; 202 uint32_t sc_ecntrl; 203 uint32_t sc_dmactrl; 204 uint32_t sc_macstnaddr1; 205 uint32_t sc_macstnaddr2; 206 uint32_t sc_tctrl; 207 uint32_t sc_rctrl; 208 uint32_t sc_gaddr[16]; 209 uint64_t sc_macaddrs[15]; 210 211 void *sc_tx_ih; 212 void *sc_rx_ih; 213 void *sc_error_ih; 214 void *sc_soft_ih; 215 216 kmutex_t *sc_lock; 217 kmutex_t *sc_hwlock; 218 219 struct evcnt sc_ev_tx_stall; 220 struct evcnt sc_ev_tx_intr; 221 struct evcnt sc_ev_rx_stall; 222 struct evcnt sc_ev_rx_intr; 223 struct evcnt sc_ev_error_intr; 224 struct evcnt sc_ev_soft_intr; 225 struct evcnt sc_ev_tx_pause; 226 struct evcnt sc_ev_rx_pause; 227 struct evcnt sc_ev_mii_ticks; 228 229 struct callout sc_mii_callout; 230 uint64_t sc_mii_last_tick; 231 232 struct ifqueue sc_rx_bufcache; 233 struct pq3etsec_mapcache *sc_rx_mapcache; 234 struct pq3etsec_mapcache *sc_tx_mapcache; 235 236 /* Interrupt Coalescing parameters */ 237 int sc_ic_rx_time; 238 int sc_ic_rx_count; 239 int sc_ic_tx_time; 240 int sc_ic_tx_count; 241 }; 242 243 #define ETSEC_IC_RX_ENABLED(sc) \ 244 ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 245 #define ETSEC_IC_TX_ENABLED(sc) \ 246 ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 247 248 struct pq3mdio_softc { 249 device_t mdio_dev; 250 251 kmutex_t *mdio_lock; 252 253 bus_space_tag_t mdio_bst; 254 bus_space_handle_t mdio_bsh; 255 }; 256 257 static int pq3etsec_match(device_t, cfdata_t, void *); 258 static void pq3etsec_attach(device_t, device_t, void *); 259 260 static int pq3mdio_match(device_t, cfdata_t, void *); 261 static void pq3mdio_attach(device_t, device_t, void *); 262 263 static void pq3etsec_ifstart(struct ifnet *); 264 static void pq3etsec_ifwatchdog(struct ifnet *); 265 static int pq3etsec_ifinit(struct ifnet *); 266 static void pq3etsec_ifstop(struct ifnet *, int); 267 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 268 269 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 270 struct pq3etsec_mapcache **, size_t, size_t, size_t); 271 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 272 struct pq3etsec_mapcache *); 273 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 274 struct pq3etsec_mapcache *); 275 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 276 struct pq3etsec_mapcache *, bus_dmamap_t); 277 278 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 279 struct pq3etsec_txqueue *, u_int); 280 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 281 struct pq3etsec_txqueue *); 282 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 283 struct pq3etsec_txqueue *); 284 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 285 struct pq3etsec_txqueue *); 286 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 287 struct pq3etsec_txqueue *, struct mbuf *m); 288 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 289 struct pq3etsec_txqueue *); 290 291 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 292 struct pq3etsec_rxqueue *, u_int); 293 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 294 struct pq3etsec_rxqueue *); 295 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 296 struct pq3etsec_rxqueue *, bool); 297 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 298 struct pq3etsec_rxqueue *); 299 300 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 301 302 static void pq3etsec_mii_tick(void *); 303 static int pq3etsec_rx_intr(void *); 304 static int pq3etsec_tx_intr(void *); 305 static int pq3etsec_error_intr(void *); 306 static void pq3etsec_soft_intr(void *); 307 308 static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 309 static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 310 311 static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 312 313 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 314 pq3etsec_match, pq3etsec_attach, NULL, NULL); 315 316 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 317 pq3mdio_match, pq3mdio_attach, NULL, NULL); 318 319 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 320 pq3mdio_match, pq3mdio_attach, NULL, NULL); 321 322 static inline uint32_t 323 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 324 { 325 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 326 } 327 328 static inline void 329 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 330 { 331 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 332 } 333 334 static inline uint32_t 335 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 336 { 337 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 338 } 339 340 static int 341 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 342 { 343 return strcmp(cf->cf_name, "mdio") == 0; 344 } 345 346 static int 347 pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 348 { 349 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 350 const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 351 || svr == (SVR_P1016v1 >> 16)); 352 353 if (device_is_a(parent, "cpunode")) { 354 if (!p1025_p 355 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 356 return 0; 357 358 return 1; 359 } 360 361 if (device_is_a(parent, "tsec")) { 362 if (p1025_p 363 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 364 return 0; 365 366 return 1; 367 } 368 369 return 0; 370 } 371 372 static void 373 pq3mdio_attach(device_t parent, device_t self, void *aux) 374 { 375 struct pq3mdio_softc * const mdio = device_private(self); 376 struct cpunode_attach_args * const cna = aux; 377 struct cpunode_locators * const cnl = &cna->cna_locs; 378 379 mdio->mdio_dev = self; 380 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 381 382 if (device_is_a(parent, "cpunode")) { 383 struct cpunode_softc * const psc = device_private(parent); 384 psc->sc_children |= cna->cna_childmask; 385 386 mdio->mdio_bst = cna->cna_memt; 387 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 388 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 389 aprint_error(": error mapping registers @ %#x\n", 390 cnl->cnl_addr); 391 return; 392 } 393 } else { 394 struct pq3etsec_softc * const sc = device_private(parent); 395 396 KASSERT(device_is_a(parent, "tsec")); 397 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 398 || cnl->cnl_addr == ETSEC2_BASE 399 || cnl->cnl_addr == ETSEC3_BASE 400 || cnl->cnl_addr == ETSEC4_BASE, 401 "unknown tsec addr %x", cnl->cnl_addr); 402 403 mdio->mdio_bst = sc->sc_bst; 404 mdio->mdio_bsh = sc->sc_bsh; 405 } 406 407 aprint_normal("\n"); 408 } 409 410 static int 411 pq3mdio_mii_readreg(device_t self, int phy, int reg) 412 { 413 struct pq3mdio_softc * const mdio = device_private(self); 414 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 415 416 mutex_enter(mdio->mdio_lock); 417 418 etsec_mdio_write(mdio, MIIMADD, 419 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 420 421 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 422 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 423 424 while (etsec_mdio_read(mdio, MIIMIND) != 0) { 425 delay(1); 426 } 427 int data = etsec_mdio_read(mdio, MIIMSTAT); 428 429 if (miimcom == MIIMCOM_SCAN) 430 etsec_mdio_write(mdio, MIIMCOM, miimcom); 431 432 #if 0 433 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 434 __func__, phy, reg, data); 435 #endif 436 mutex_exit(mdio->mdio_lock); 437 return data; 438 } 439 440 static void 441 pq3mdio_mii_writereg(device_t self, int phy, int reg, int data) 442 { 443 struct pq3mdio_softc * const mdio = device_private(self); 444 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 445 446 #if 0 447 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 448 __func__, phy, reg, data); 449 #endif 450 451 mutex_enter(mdio->mdio_lock); 452 453 etsec_mdio_write(mdio, MIIMADD, 454 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 455 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 456 etsec_mdio_write(mdio, MIIMCON, data); 457 458 int timo = 1000; /* 1ms */ 459 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 460 delay(1); 461 } 462 463 if (miimcom == MIIMCOM_SCAN) 464 etsec_mdio_write(mdio, MIIMCOM, miimcom); 465 466 mutex_exit(mdio->mdio_lock); 467 } 468 469 static inline void 470 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 471 { 472 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 473 } 474 475 static void 476 pq3etsec_mii_statchg(struct ifnet *ifp) 477 { 478 struct pq3etsec_softc * const sc = ifp->if_softc; 479 struct mii_data * const mii = &sc->sc_mii; 480 481 uint32_t maccfg1 = sc->sc_maccfg1; 482 uint32_t maccfg2 = sc->sc_maccfg2; 483 uint32_t ecntrl = sc->sc_ecntrl; 484 485 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 486 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 487 488 if (sc->sc_mii.mii_media_active & IFM_FDX) { 489 maccfg2 |= MACCFG2_FD; 490 } 491 492 /* 493 * Now deal with the flow control bits. 494 */ 495 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 496 && (mii->mii_media_active & IFM_ETH_FMASK)) { 497 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 498 maccfg1 |= MACCFG1_RX_FLOW; 499 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 500 maccfg1 |= MACCFG1_TX_FLOW; 501 } 502 503 /* 504 * Now deal with the speed. 505 */ 506 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 507 maccfg2 |= MACCFG2_IFMODE_GMII; 508 } else { 509 maccfg2 |= MACCFG2_IFMODE_MII; 510 ecntrl &= ~ECNTRL_R100M; 511 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 512 ecntrl |= ECNTRL_R100M; 513 } 514 } 515 516 /* 517 * If things are different, re-init things. 518 */ 519 if (maccfg1 != sc->sc_maccfg1 520 || maccfg2 != sc->sc_maccfg2 521 || ecntrl != sc->sc_ecntrl) { 522 if (sc->sc_if.if_flags & IFF_RUNNING) 523 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 524 sc->sc_maccfg1 = maccfg1; 525 sc->sc_maccfg2 = maccfg2; 526 sc->sc_ecntrl = ecntrl; 527 } 528 } 529 530 #if 0 531 static void 532 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 533 { 534 struct pq3etsec_softc * const sc = ifp->if_softc; 535 536 mii_pollstat(&sc->sc_mii); 537 ether_mediastatus(ifp, ifmr); 538 ifmr->ifm_status = sc->sc_mii.mii_media_status; 539 ifmr->ifm_active = sc->sc_mii.mii_media_active; 540 } 541 542 static int 543 pq3etsec_mediachange(struct ifnet *ifp) 544 { 545 struct pq3etsec_softc * const sc = ifp->if_softc; 546 547 if ((ifp->if_flags & IFF_UP) == 0) 548 return 0; 549 550 int rv = mii_mediachg(&sc->sc_mii); 551 return (rv == ENXIO) ? 0 : rv; 552 } 553 #endif 554 555 static int 556 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 557 { 558 559 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 560 return 0; 561 562 return 1; 563 } 564 565 static void 566 pq3etsec_attach(device_t parent, device_t self, void *aux) 567 { 568 struct cpunode_softc * const psc = device_private(parent); 569 struct pq3etsec_softc * const sc = device_private(self); 570 struct cpunode_attach_args * const cna = aux; 571 struct cpunode_locators * const cnl = &cna->cna_locs; 572 cfdata_t cf = device_cfdata(self); 573 int error; 574 575 psc->sc_children |= cna->cna_childmask; 576 sc->sc_dev = self; 577 sc->sc_bst = cna->cna_memt; 578 sc->sc_dmat = &booke_bus_dma_tag; 579 580 /* 581 * Pull out the mdio bus and phy we are supposed to use. 582 */ 583 const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 584 const int phy = cf->cf_loc[CPUNODECF_PHY]; 585 if (mdio != CPUNODECF_MDIO_DEFAULT) 586 aprint_normal(" mdio %d", mdio); 587 588 /* 589 * See if the phy is in the config file... 590 */ 591 if (phy != CPUNODECF_PHY_DEFAULT) { 592 sc->sc_phy_addr = phy; 593 } else { 594 unsigned char prop_name[20]; 595 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 596 cnl->cnl_instance); 597 sc->sc_phy_addr = board_info_get_number(prop_name); 598 } 599 if (sc->sc_phy_addr != MII_PHY_ANY) 600 aprint_normal(" phy %d", sc->sc_phy_addr); 601 602 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 603 &sc->sc_bsh); 604 if (error) { 605 aprint_error(": error mapping registers: %d\n", error); 606 return; 607 } 608 609 /* 610 * Assume firmware has aready set the mac address and fetch it 611 * before we reinit it. 612 */ 613 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 614 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 615 sc->sc_rctrl = RCTRL_DEFAULT; 616 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 617 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 618 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 619 620 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 621 size_t len; 622 const uint8_t *mac_addr = 623 board_info_get_data("tsec-mac-addr-base", &len); 624 KASSERT(len == ETHER_ADDR_LEN); 625 sc->sc_macstnaddr2 = 626 (mac_addr[1] << 24) 627 | (mac_addr[0] << 16); 628 sc->sc_macstnaddr1 = 629 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 630 | (mac_addr[4] << 16) 631 | (mac_addr[3] << 8) 632 | (mac_addr[2] << 0); 633 #if 0 634 aprint_error(": mac-address unknown\n"); 635 return; 636 #endif 637 } 638 639 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 640 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 641 642 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 643 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 644 645 /* Disable interrupts */ 646 etsec_write(sc, IMASK, 0); 647 648 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 649 if (error) { 650 aprint_error(": failed to init rxq: %d\n", error); 651 return; 652 } 653 654 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 655 if (error) { 656 aprint_error(": failed to init txq: %d\n", error); 657 return; 658 } 659 660 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 661 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 662 if (error) { 663 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 664 return; 665 } 666 667 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 668 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 669 if (error) { 670 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 671 return; 672 } 673 674 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 675 pq3etsec_tx_intr, sc); 676 if (sc->sc_tx_ih == NULL) { 677 aprint_error(": failed to establish tx interrupt: %d\n", 678 cnl->cnl_intrs[0]); 679 return; 680 } 681 682 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 683 pq3etsec_rx_intr, sc); 684 if (sc->sc_rx_ih == NULL) { 685 aprint_error(": failed to establish rx interrupt: %d\n", 686 cnl->cnl_intrs[1]); 687 return; 688 } 689 690 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 691 pq3etsec_error_intr, sc); 692 if (sc->sc_error_ih == NULL) { 693 aprint_error(": failed to establish error interrupt: %d\n", 694 cnl->cnl_intrs[2]); 695 return; 696 } 697 698 int softint_flags = SOFTINT_NET; 699 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 700 softint_flags |= SOFTINT_MPSAFE; 701 #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 702 sc->sc_soft_ih = softint_establish(softint_flags, 703 pq3etsec_soft_intr, sc); 704 if (sc->sc_soft_ih == NULL) { 705 aprint_error(": failed to establish soft interrupt\n"); 706 return; 707 } 708 709 /* 710 * If there was no MDIO 711 */ 712 if (mdio == CPUNODECF_MDIO_DEFAULT) { 713 aprint_normal("\n"); 714 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna); 715 if (mdio_cf != NULL) { 716 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL); 717 } 718 } else { 719 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 720 if (sc->sc_mdio_dev == NULL) { 721 aprint_error(": failed to locate mdio device\n"); 722 return; 723 } 724 aprint_normal("\n"); 725 } 726 727 etsec_write(sc, ATTR, ATTR_DEFAULT); 728 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 729 730 /* Enable interrupt coalesing */ 731 sc->sc_ic_rx_time = 768; 732 sc->sc_ic_rx_count = 16; 733 sc->sc_ic_tx_time = 768; 734 sc->sc_ic_tx_count = 16; 735 pq3etsec_set_ic_rx(sc); 736 pq3etsec_set_ic_tx(sc); 737 pq3etsec_sysctl_setup(NULL, sc); 738 739 char enaddr[ETHER_ADDR_LEN] = { 740 [0] = sc->sc_macstnaddr2 >> 16, 741 [1] = sc->sc_macstnaddr2 >> 24, 742 [2] = sc->sc_macstnaddr1 >> 0, 743 [3] = sc->sc_macstnaddr1 >> 8, 744 [4] = sc->sc_macstnaddr1 >> 16, 745 [5] = sc->sc_macstnaddr1 >> 24, 746 }; 747 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 748 ether_sprintf(enaddr)); 749 750 const char * const xname = device_xname(sc->sc_dev); 751 struct ethercom * const ec = &sc->sc_ec; 752 struct ifnet * const ifp = &ec->ec_if; 753 754 ec->ec_mii = &sc->sc_mii; 755 756 sc->sc_mii.mii_ifp = ifp; 757 sc->sc_mii.mii_readreg = pq3mdio_mii_readreg; 758 sc->sc_mii.mii_writereg = pq3mdio_mii_writereg; 759 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 760 761 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 762 ether_mediastatus); 763 764 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 765 mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff, 766 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 767 768 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 769 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 770 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 771 } else { 772 callout_schedule(&sc->sc_mii_callout, hz); 773 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 774 } 775 } else { 776 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 777 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 778 } 779 780 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 781 | ETHERCAP_JUMBO_MTU; 782 783 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 784 ifp->if_softc = sc; 785 ifp->if_capabilities = IFCAP_ETSEC; 786 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 787 ifp->if_ioctl = pq3etsec_ifioctl; 788 ifp->if_start = pq3etsec_ifstart; 789 ifp->if_watchdog = pq3etsec_ifwatchdog; 790 ifp->if_init = pq3etsec_ifinit; 791 ifp->if_stop = pq3etsec_ifstop; 792 IFQ_SET_READY(&ifp->if_snd); 793 794 /* 795 * Attach the interface. 796 */ 797 if_initialize(ifp); 798 ether_ifattach(ifp, enaddr); 799 if_register(ifp); 800 801 pq3etsec_ifstop(ifp, true); 802 803 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 804 NULL, xname, "rx stall"); 805 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 806 NULL, xname, "tx stall"); 807 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 808 NULL, xname, "tx intr"); 809 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 810 NULL, xname, "rx intr"); 811 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 812 NULL, xname, "error intr"); 813 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 814 NULL, xname, "soft intr"); 815 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 816 NULL, xname, "tx pause"); 817 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 818 NULL, xname, "rx pause"); 819 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 820 NULL, xname, "mii ticks"); 821 } 822 823 static uint64_t 824 pq3etsec_macaddr_create(const uint8_t *lladdr) 825 { 826 uint64_t macaddr = 0; 827 828 lladdr += ETHER_ADDR_LEN; 829 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 830 macaddr = (macaddr << 8) | *--lladdr; 831 } 832 return macaddr << 16; 833 } 834 835 static int 836 pq3etsec_ifinit(struct ifnet *ifp) 837 { 838 struct pq3etsec_softc * const sc = ifp->if_softc; 839 int error = 0; 840 841 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 842 if (ifp->if_mtu > ETHERMTU_JUMBO) 843 return error; 844 845 KASSERT(ifp->if_flags & IFF_UP); 846 847 /* 848 * Stop the interface (steps 1 to 4 in the Soft Reset and 849 * Reconfigurating Procedure. 850 */ 851 pq3etsec_ifstop(ifp, 0); 852 853 /* 854 * If our frame size has changed (or it's our first time through) 855 * destroy the existing transmit mapcache. 856 */ 857 if (sc->sc_tx_mapcache != NULL 858 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 859 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 860 sc->sc_tx_mapcache = NULL; 861 } 862 863 if (sc->sc_tx_mapcache == NULL) { 864 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 865 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 866 if (error) 867 return error; 868 } 869 870 sc->sc_ev_mii_ticks.ev_count++; 871 mii_tick(&sc->sc_mii); 872 873 if (ifp->if_flags & IFF_PROMISC) { 874 sc->sc_rctrl |= RCTRL_PROM; 875 } else { 876 sc->sc_rctrl &= ~RCTRL_PROM; 877 } 878 879 uint32_t rctrl_prsdep = 0; 880 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 881 if (VLAN_ATTACHED(&sc->sc_ec)) { 882 sc->sc_rctrl |= RCTRL_VLEX; 883 rctrl_prsdep = RCTRL_PRSDEP_L2; 884 } 885 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 886 sc->sc_rctrl |= RCTRL_IPCSEN; 887 rctrl_prsdep = RCTRL_PRSDEP_L3; 888 } 889 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 890 sc->sc_rctrl |= RCTRL_TUCSEN; 891 rctrl_prsdep = RCTRL_PRSDEP_L4; 892 } 893 sc->sc_rctrl |= rctrl_prsdep; 894 #if 0 895 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 896 aprint_normal_dev(sc->sc_dev, 897 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 898 sc->sc_rctrl, 899 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 900 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 901 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 902 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 903 #endif 904 905 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 906 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 907 sc->sc_tctrl |= TCTRL_VLINS; 908 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 909 sc->sc_tctrl |= TCTRL_IPCSEN; 910 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 911 sc->sc_tctrl |= TCTRL_TUCSEN; 912 #if 0 913 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 914 aprint_normal_dev(sc->sc_dev, 915 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 916 sc->sc_tctrl, 917 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 918 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 919 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 920 #endif 921 922 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 923 924 const uint64_t macstnaddr = 925 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 926 927 sc->sc_imask = IEVENT_DPE; 928 929 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 930 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 931 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 932 933 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 934 pq3etsec_txq_reset(sc, &sc->sc_txq); 935 936 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 937 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 938 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 939 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 940 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 941 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 942 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 943 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 944 945 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 946 pq3etsec_mc_setup(sc); 947 948 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 949 etsec_write(sc, MRBLR, MCLBYTES); 950 951 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 952 sc->sc_dmactrl |= DMACTRL_DEFAULT; 953 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 954 955 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 956 etsec_write(sc, TQUEUE, TQUEUE_EN0); 957 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 958 959 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 960 961 /* 12. Enable receive queues in RQUEUE, */ 962 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 963 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 964 965 /* and optionally set TOE functionality in RCTRL. */ 966 etsec_write(sc, RCTRL, sc->sc_rctrl); 967 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 968 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 969 sc->sc_rx_adjlen += sizeof(struct rxfcb); 970 971 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 972 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 973 974 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 975 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 976 977 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 978 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 979 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 980 981 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 982 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 983 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 984 985 sc->sc_soft_flags = 0; 986 987 etsec_write(sc, IMASK, sc->sc_imask); 988 989 ifp->if_flags |= IFF_RUNNING; 990 991 return error; 992 } 993 994 static void 995 pq3etsec_ifstop(struct ifnet *ifp, int disable) 996 { 997 struct pq3etsec_softc * const sc = ifp->if_softc; 998 999 KASSERT(!cpu_intr_p()); 1000 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 1001 /* 1002 * Clear the GTSC and GRSC from the interrupt mask until 1003 * we are ready for them. Then clear them from IEVENT, 1004 * request the graceful shutdown, and then enable the 1005 * GTSC and GRSC bits in the mask. This should cause the 1006 * error interrupt to fire which will issue a wakeup to 1007 * allow us to resume. 1008 */ 1009 1010 /* 1011 * 1. Set GRS/GTS bits in DMACTRL register 1012 */ 1013 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 1014 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1015 etsec_write(sc, IEVENT, imask_gsc_mask); 1016 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1017 1018 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 1019 /* 1020 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1021 */ 1022 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1023 1024 u_int timo = 1000; 1025 uint32_t ievent = etsec_read(sc, IEVENT); 1026 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1027 if (--timo == 0) { 1028 aprint_error_dev(sc->sc_dev, 1029 "WARNING: " 1030 "request to stop failed (IEVENT=%#x)\n", 1031 ievent); 1032 break; 1033 } 1034 delay(10); 1035 ievent = etsec_read(sc, IEVENT); 1036 } 1037 } 1038 1039 /* 1040 * Now reset the controller. 1041 * 1042 * 3. Set SOFT_RESET bit in MACCFG1 register 1043 * 4. Clear SOFT_RESET bit in MACCFG1 register 1044 */ 1045 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1046 etsec_write(sc, MACCFG1, 0); 1047 etsec_write(sc, IMASK, 0); 1048 etsec_write(sc, IEVENT, ~0); 1049 sc->sc_imask = 0; 1050 ifp->if_flags &= ~IFF_RUNNING; 1051 1052 uint32_t tbipa = etsec_read(sc, TBIPA); 1053 if (tbipa == sc->sc_phy_addr) { 1054 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1055 etsec_write(sc, TBIPA, 0x1f); 1056 } 1057 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1058 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1059 etsec_write(sc, MIIMCFG, miimcfg); 1060 1061 /* 1062 * Let's consume any remaing transmitted packets. And if we are 1063 * disabling the interface, purge ourselves of any untransmitted 1064 * packets. But don't consume any received packets, just drop them. 1065 * If we aren't disabling the interface, save the mbufs in the 1066 * receive queue for reuse. 1067 */ 1068 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1069 pq3etsec_txq_consume(sc, &sc->sc_txq); 1070 if (disable) { 1071 pq3etsec_txq_purge(sc, &sc->sc_txq); 1072 IFQ_PURGE(&ifp->if_snd); 1073 } 1074 } 1075 1076 static void 1077 pq3etsec_ifwatchdog(struct ifnet *ifp) 1078 { 1079 } 1080 1081 static void 1082 pq3etsec_mc_setup( 1083 struct pq3etsec_softc *sc) 1084 { 1085 struct ethercom * const ec = &sc->sc_ec; 1086 struct ifnet * const ifp = &sc->sc_if; 1087 struct ether_multi *enm; 1088 struct ether_multistep step; 1089 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1090 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1091 1092 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1093 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1094 1095 ifp->if_flags &= ~IFF_ALLMULTI; 1096 1097 ETHER_FIRST_MULTI(step, ec, enm); 1098 for (u_int i = 0; enm != NULL; ) { 1099 const char *addr = enm->enm_addrlo; 1100 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1101 ifp->if_flags |= IFF_ALLMULTI; 1102 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1103 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1104 break; 1105 } 1106 if ((sc->sc_rctrl & RCTRL_EMEN) 1107 && i < __arraycount(sc->sc_macaddrs)) { 1108 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1109 } else { 1110 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1111 #if 0 1112 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1113 ether_sprintf(addr), crc, 1114 crc >> crc_shift, 1115 crc >> (crc_shift + 5), 1116 (crc >> crc_shift) & 31, 1117 1 << (((crc >> crc_shift) & 31) ^ 31)); 1118 #endif 1119 /* 1120 * The documentation doesn't completely follow PowerPC 1121 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1122 * is 0x7fa32d9b. By empirical testing, the 1123 * corresponding hash bit is word 3, bit 31 (ppc bit 1124 * order). Since 3 << 31 | 31 is 0x7f, we deduce 1125 * H[0:2] selects the register while H[3:7] selects 1126 * the bit (ppc bit order). 1127 */ 1128 crc >>= crc_shift; 1129 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1130 } 1131 ETHER_NEXT_MULTI(step, enm); 1132 } 1133 for (u_int i = 0; i < 8; i++) { 1134 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1135 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1136 #if 0 1137 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1138 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1139 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1140 i, GADDR(i), etsec_read(sc, GADDR(i))); 1141 #endif 1142 } 1143 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1144 uint64_t macaddr = sc->sc_macaddrs[i]; 1145 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1146 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1147 #if 0 1148 if (macaddr) 1149 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1150 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1151 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1152 #endif 1153 } 1154 } 1155 1156 static int 1157 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1158 { 1159 struct pq3etsec_softc *sc = ifp->if_softc; 1160 struct ifreq * const ifr = data; 1161 const int s = splnet(); 1162 int error; 1163 1164 switch (cmd) { 1165 case SIOCSIFMEDIA: 1166 case SIOCGIFMEDIA: 1167 /* Flow control requires full-duplex mode. */ 1168 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1169 (ifr->ifr_media & IFM_FDX) == 0) 1170 ifr->ifr_media &= ~IFM_ETH_FMASK; 1171 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1172 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1173 /* We can do both TXPAUSE and RXPAUSE. */ 1174 ifr->ifr_media |= 1175 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1176 } 1177 } 1178 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1179 break; 1180 1181 default: 1182 error = ether_ioctl(ifp, cmd, data); 1183 if (error != ENETRESET) 1184 break; 1185 1186 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1187 error = 0; 1188 if (ifp->if_flags & IFF_RUNNING) 1189 pq3etsec_mc_setup(sc); 1190 break; 1191 } 1192 error = pq3etsec_ifinit(ifp); 1193 break; 1194 } 1195 1196 splx(s); 1197 return error; 1198 } 1199 1200 static void 1201 pq3etsec_rxq_desc_presync( 1202 struct pq3etsec_softc *sc, 1203 struct pq3etsec_rxqueue *rxq, 1204 volatile struct rxbd *rxbd, 1205 size_t count) 1206 { 1207 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1208 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1209 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1210 } 1211 1212 static void 1213 pq3etsec_rxq_desc_postsync( 1214 struct pq3etsec_softc *sc, 1215 struct pq3etsec_rxqueue *rxq, 1216 volatile struct rxbd *rxbd, 1217 size_t count) 1218 { 1219 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1220 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1221 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1222 } 1223 1224 static void 1225 pq3etsec_txq_desc_presync( 1226 struct pq3etsec_softc *sc, 1227 struct pq3etsec_txqueue *txq, 1228 volatile struct txbd *txbd, 1229 size_t count) 1230 { 1231 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1232 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1233 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1234 } 1235 1236 static void 1237 pq3etsec_txq_desc_postsync( 1238 struct pq3etsec_softc *sc, 1239 struct pq3etsec_txqueue *txq, 1240 volatile struct txbd *txbd, 1241 size_t count) 1242 { 1243 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1244 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1245 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1246 } 1247 1248 static bus_dmamap_t 1249 pq3etsec_mapcache_get( 1250 struct pq3etsec_softc *sc, 1251 struct pq3etsec_mapcache *dmc) 1252 { 1253 KASSERT(dmc->dmc_nmaps > 0); 1254 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1255 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1256 } 1257 1258 static void 1259 pq3etsec_mapcache_put( 1260 struct pq3etsec_softc *sc, 1261 struct pq3etsec_mapcache *dmc, 1262 bus_dmamap_t map) 1263 { 1264 KASSERT(map != NULL); 1265 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1266 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1267 } 1268 1269 static void 1270 pq3etsec_mapcache_destroy( 1271 struct pq3etsec_softc *sc, 1272 struct pq3etsec_mapcache *dmc) 1273 { 1274 const size_t dmc_size = 1275 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1276 1277 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1278 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1279 } 1280 kmem_intr_free(dmc, dmc_size); 1281 } 1282 1283 static int 1284 pq3etsec_mapcache_create( 1285 struct pq3etsec_softc *sc, 1286 struct pq3etsec_mapcache **dmc_p, 1287 size_t maxmaps, 1288 size_t maxmapsize, 1289 size_t maxseg) 1290 { 1291 const size_t dmc_size = 1292 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1293 struct pq3etsec_mapcache * const dmc = 1294 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1295 1296 dmc->dmc_maxmaps = maxmaps; 1297 dmc->dmc_nmaps = maxmaps; 1298 dmc->dmc_maxmapsize = maxmapsize; 1299 dmc->dmc_maxseg = maxseg; 1300 1301 for (u_int i = 0; i < maxmaps; i++) { 1302 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1303 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1304 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1305 if (error) { 1306 aprint_error_dev(sc->sc_dev, 1307 "failed to creat dma map cache " 1308 "entry %u of %zu: %d\n", 1309 i, maxmaps, error); 1310 while (i-- > 0) { 1311 bus_dmamap_destroy(sc->sc_dmat, 1312 dmc->dmc_maps[i]); 1313 } 1314 kmem_intr_free(dmc, dmc_size); 1315 return error; 1316 } 1317 KASSERT(dmc->dmc_maps[i] != NULL); 1318 } 1319 1320 *dmc_p = dmc; 1321 1322 return 0; 1323 } 1324 1325 #if 0 1326 static void 1327 pq3etsec_dmamem_free( 1328 bus_dma_tag_t dmat, 1329 size_t map_size, 1330 bus_dma_segment_t *seg, 1331 bus_dmamap_t map, 1332 void *kvap) 1333 { 1334 bus_dmamap_destroy(dmat, map); 1335 bus_dmamem_unmap(dmat, kvap, map_size); 1336 bus_dmamem_free(dmat, seg, 1); 1337 } 1338 #endif 1339 1340 static int 1341 pq3etsec_dmamem_alloc( 1342 bus_dma_tag_t dmat, 1343 size_t map_size, 1344 bus_dma_segment_t *seg, 1345 bus_dmamap_t *map, 1346 void **kvap) 1347 { 1348 int error; 1349 int nseg; 1350 1351 *kvap = NULL; 1352 *map = NULL; 1353 1354 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1355 seg, 1, &nseg, 0); 1356 if (error) 1357 return error; 1358 1359 KASSERT(nseg == 1); 1360 1361 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1362 BUS_DMA_COHERENT); 1363 if (error == 0) { 1364 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1365 map); 1366 if (error == 0) { 1367 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1368 NULL, 0); 1369 if (error == 0) 1370 return 0; 1371 bus_dmamap_destroy(dmat, *map); 1372 *map = NULL; 1373 } 1374 bus_dmamem_unmap(dmat, *kvap, map_size); 1375 *kvap = NULL; 1376 } 1377 bus_dmamem_free(dmat, seg, nseg); 1378 return 0; 1379 } 1380 1381 static struct mbuf * 1382 pq3etsec_rx_buf_alloc( 1383 struct pq3etsec_softc *sc) 1384 { 1385 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1386 if (m == NULL) { 1387 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1388 return NULL; 1389 } 1390 MCLGET(m, M_DONTWAIT); 1391 if ((m->m_flags & M_EXT) == 0) { 1392 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1393 m_freem(m); 1394 return NULL; 1395 } 1396 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1397 1398 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1399 if (map == NULL) { 1400 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1401 m_freem(m); 1402 return NULL; 1403 } 1404 M_SETCTX(m, map); 1405 m->m_len = m->m_pkthdr.len = MCLBYTES; 1406 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1407 BUS_DMA_READ|BUS_DMA_NOWAIT); 1408 if (error) { 1409 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1410 error); 1411 M_SETCTX(m, NULL); 1412 m_freem(m); 1413 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1414 return NULL; 1415 } 1416 KASSERT(map->dm_mapsize == MCLBYTES); 1417 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1418 BUS_DMASYNC_PREREAD); 1419 1420 return m; 1421 } 1422 1423 static void 1424 pq3etsec_rx_map_unload( 1425 struct pq3etsec_softc *sc, 1426 struct mbuf *m) 1427 { 1428 KASSERT(m); 1429 for (; m != NULL; m = m->m_next) { 1430 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1431 KASSERT(map); 1432 KASSERT(map->dm_mapsize == MCLBYTES); 1433 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1434 BUS_DMASYNC_POSTREAD); 1435 bus_dmamap_unload(sc->sc_dmat, map); 1436 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1437 M_SETCTX(m, NULL); 1438 } 1439 } 1440 1441 static bool 1442 pq3etsec_rxq_produce( 1443 struct pq3etsec_softc *sc, 1444 struct pq3etsec_rxqueue *rxq) 1445 { 1446 volatile struct rxbd *producer = rxq->rxq_producer; 1447 #if 0 1448 size_t inuse = rxq->rxq_inuse; 1449 #endif 1450 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1451 struct mbuf *m; 1452 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1453 if (m == NULL) { 1454 m = pq3etsec_rx_buf_alloc(sc); 1455 if (m == NULL) { 1456 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1457 break; 1458 } 1459 } 1460 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1461 KASSERT(map); 1462 1463 #ifdef ETSEC_DEBUG 1464 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1465 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1466 #endif 1467 1468 /* rxbd_len is write-only by the ETSEC */ 1469 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1470 membar_producer(); 1471 producer->rxbd_flags |= RXBD_E; 1472 if (__predict_false(rxq->rxq_mhead == NULL)) { 1473 KASSERT(producer == rxq->rxq_consumer); 1474 rxq->rxq_mconsumer = m; 1475 } 1476 *rxq->rxq_mtail = m; 1477 rxq->rxq_mtail = &m->m_next; 1478 m->m_len = MCLBYTES; 1479 m->m_next = NULL; 1480 rxq->rxq_inuse++; 1481 if (++producer == rxq->rxq_last) { 1482 membar_producer(); 1483 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1484 rxq->rxq_last - rxq->rxq_producer); 1485 producer = rxq->rxq_producer = rxq->rxq_first; 1486 } 1487 } 1488 if (producer != rxq->rxq_producer) { 1489 membar_producer(); 1490 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1491 producer - rxq->rxq_producer); 1492 rxq->rxq_producer = producer; 1493 } 1494 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1495 if (qhlt) { 1496 KASSERT(qhlt & rxq->rxq_qmask); 1497 sc->sc_ev_rx_stall.ev_count++; 1498 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1499 } 1500 #if 0 1501 aprint_normal_dev(sc->sc_dev, 1502 "%s: buffers inuse went from %zu to %zu\n", 1503 __func__, inuse, rxq->rxq_inuse); 1504 #endif 1505 return true; 1506 } 1507 1508 static bool 1509 pq3etsec_rx_offload( 1510 struct pq3etsec_softc *sc, 1511 struct mbuf *m, 1512 const struct rxfcb *fcb) 1513 { 1514 if (fcb->rxfcb_flags & RXFCB_VLN) { 1515 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl, 1516 m_freem(m); return false); 1517 } 1518 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1519 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1520 return true; 1521 int csum_flags = 0; 1522 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1523 csum_flags |= M_CSUM_IPv4; 1524 if (fcb->rxfcb_flags & RXFCB_EIP) 1525 csum_flags |= M_CSUM_IPv4_BAD; 1526 } 1527 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1528 int ipv_flags; 1529 if (fcb->rxfcb_flags & RXFCB_IP6) 1530 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1531 else 1532 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1533 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1534 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1535 } else { 1536 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1537 } 1538 if (fcb->rxfcb_flags & RXFCB_ETU) 1539 csum_flags |= M_CSUM_TCP_UDP_BAD; 1540 } 1541 1542 m->m_pkthdr.csum_flags = csum_flags; 1543 return true; 1544 } 1545 1546 static void 1547 pq3etsec_rx_input( 1548 struct pq3etsec_softc *sc, 1549 struct mbuf *m, 1550 uint16_t rxbd_flags) 1551 { 1552 struct ifnet * const ifp = &sc->sc_if; 1553 1554 pq3etsec_rx_map_unload(sc, m); 1555 1556 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1557 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1558 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1559 return; 1560 } 1561 m_adj(m, sc->sc_rx_adjlen); 1562 1563 if (rxbd_flags & RXBD_M) 1564 m->m_flags |= M_PROMISC; 1565 if (rxbd_flags & RXBD_BC) 1566 m->m_flags |= M_BCAST; 1567 if (rxbd_flags & RXBD_MC) 1568 m->m_flags |= M_MCAST; 1569 m->m_flags |= M_HASFCS; 1570 m_set_rcvif(m, &sc->sc_if); 1571 1572 ifp->if_ipackets++; 1573 ifp->if_ibytes += m->m_pkthdr.len; 1574 1575 /* 1576 * Let's give it to the network subsystm to deal with. 1577 */ 1578 int s = splnet(); 1579 bpf_mtap(ifp, m); 1580 if_input(ifp, m); 1581 splx(s); 1582 } 1583 1584 static void 1585 pq3etsec_rxq_consume( 1586 struct pq3etsec_softc *sc, 1587 struct pq3etsec_rxqueue *rxq) 1588 { 1589 struct ifnet * const ifp = &sc->sc_if; 1590 volatile struct rxbd *consumer = rxq->rxq_consumer; 1591 size_t rxconsumed = 0; 1592 1593 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1594 1595 for (;;) { 1596 if (consumer == rxq->rxq_producer) { 1597 rxq->rxq_consumer = consumer; 1598 rxq->rxq_inuse -= rxconsumed; 1599 KASSERT(rxq->rxq_inuse == 0); 1600 return; 1601 } 1602 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1603 const uint16_t rxbd_flags = consumer->rxbd_flags; 1604 if (rxbd_flags & RXBD_E) { 1605 rxq->rxq_consumer = consumer; 1606 rxq->rxq_inuse -= rxconsumed; 1607 return; 1608 } 1609 KASSERT(rxq->rxq_mconsumer != NULL); 1610 #ifdef ETSEC_DEBUG 1611 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1612 #endif 1613 #if 0 1614 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1615 __func__, 1616 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1617 mtod(rxq->rxq_mconsumer, int *)[0], 1618 mtod(rxq->rxq_mconsumer, int *)[1], 1619 mtod(rxq->rxq_mconsumer, int *)[2], 1620 mtod(rxq->rxq_mconsumer, int *)[3]); 1621 #endif 1622 /* 1623 * We own this packet again. Clear all flags except wrap. 1624 */ 1625 rxconsumed++; 1626 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1627 1628 /* 1629 * If this descriptor has the LAST bit set and no errors, 1630 * it's a valid input packet. 1631 */ 1632 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1633 size_t rxbd_len = consumer->rxbd_len; 1634 struct mbuf *m = rxq->rxq_mhead; 1635 struct mbuf *m_last = rxq->rxq_mconsumer; 1636 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1637 rxq->rxq_mtail = &rxq->rxq_mhead; 1638 rxq->rxq_mconsumer = rxq->rxq_mhead; 1639 m_last->m_next = NULL; 1640 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1641 m->m_pkthdr.len = rxbd_len; 1642 pq3etsec_rx_input(sc, m, rxbd_flags); 1643 } else if (rxbd_flags & RXBD_L) { 1644 KASSERT(rxbd_flags & RXBD_ERRORS); 1645 struct mbuf *m; 1646 /* 1647 * We encountered an error, take the mbufs and add 1648 * then to the rx bufcache so we can reuse them. 1649 */ 1650 ifp->if_ierrors++; 1651 for (m = rxq->rxq_mhead; 1652 m != rxq->rxq_mconsumer; 1653 m = m->m_next) { 1654 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1655 } 1656 m = rxq->rxq_mconsumer; 1657 if ((rxq->rxq_mhead = m->m_next) == NULL) 1658 rxq->rxq_mtail = &rxq->rxq_mhead; 1659 rxq->rxq_mconsumer = m->m_next; 1660 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1661 } else { 1662 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1663 } 1664 #ifdef ETSEC_DEBUG 1665 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1666 #endif 1667 1668 /* 1669 * Wrap at the last entry! 1670 */ 1671 if (rxbd_flags & RXBD_W) { 1672 KASSERT(consumer + 1 == rxq->rxq_last); 1673 consumer = rxq->rxq_first; 1674 } else { 1675 consumer++; 1676 } 1677 #ifdef ETSEC_DEBUG 1678 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1679 #endif 1680 } 1681 } 1682 1683 static void 1684 pq3etsec_rxq_purge( 1685 struct pq3etsec_softc *sc, 1686 struct pq3etsec_rxqueue *rxq, 1687 bool discard) 1688 { 1689 struct mbuf *m; 1690 1691 if ((m = rxq->rxq_mhead) != NULL) { 1692 #ifdef ETSEC_DEBUG 1693 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1694 #endif 1695 1696 if (discard) { 1697 pq3etsec_rx_map_unload(sc, m); 1698 m_freem(m); 1699 } else { 1700 while (m != NULL) { 1701 struct mbuf *m0 = m->m_next; 1702 m->m_next = NULL; 1703 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1704 m = m0; 1705 } 1706 } 1707 1708 } 1709 1710 rxq->rxq_mconsumer = NULL; 1711 rxq->rxq_mhead = NULL; 1712 rxq->rxq_mtail = &rxq->rxq_mhead; 1713 rxq->rxq_inuse = 0; 1714 } 1715 1716 static void 1717 pq3etsec_rxq_reset( 1718 struct pq3etsec_softc *sc, 1719 struct pq3etsec_rxqueue *rxq) 1720 { 1721 /* 1722 * sync all the descriptors 1723 */ 1724 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1725 rxq->rxq_last - rxq->rxq_first); 1726 1727 /* 1728 * Make sure we own all descriptors in the ring. 1729 */ 1730 volatile struct rxbd *rxbd; 1731 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1732 rxbd->rxbd_flags = RXBD_I; 1733 } 1734 1735 /* 1736 * Last descriptor has the wrap flag. 1737 */ 1738 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1739 1740 /* 1741 * Reset the producer consumer indexes. 1742 */ 1743 rxq->rxq_consumer = rxq->rxq_first; 1744 rxq->rxq_producer = rxq->rxq_first; 1745 rxq->rxq_inuse = 0; 1746 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1747 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1748 1749 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1750 1751 /* 1752 * Restart the transmit at the first descriptor 1753 */ 1754 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1755 } 1756 1757 static int 1758 pq3etsec_rxq_attach( 1759 struct pq3etsec_softc *sc, 1760 struct pq3etsec_rxqueue *rxq, 1761 u_int qno) 1762 { 1763 size_t map_size = PAGE_SIZE; 1764 size_t desc_count = map_size / sizeof(struct rxbd); 1765 int error; 1766 void *descs; 1767 1768 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1769 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1770 if (error) 1771 return error; 1772 1773 memset(descs, 0, map_size); 1774 rxq->rxq_first = descs; 1775 rxq->rxq_last = rxq->rxq_first + desc_count; 1776 rxq->rxq_consumer = descs; 1777 rxq->rxq_producer = descs; 1778 1779 pq3etsec_rxq_purge(sc, rxq, true); 1780 pq3etsec_rxq_reset(sc, rxq); 1781 1782 rxq->rxq_reg_rbase = RBASEn(qno); 1783 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1784 1785 return 0; 1786 } 1787 1788 static bool 1789 pq3etsec_txq_active_p( 1790 struct pq3etsec_softc * const sc, 1791 struct pq3etsec_txqueue *txq) 1792 { 1793 return !IF_IS_EMPTY(&txq->txq_mbufs); 1794 } 1795 1796 static bool 1797 pq3etsec_txq_fillable_p( 1798 struct pq3etsec_softc * const sc, 1799 struct pq3etsec_txqueue *txq) 1800 { 1801 return txq->txq_free >= txq->txq_threshold; 1802 } 1803 1804 static int 1805 pq3etsec_txq_attach( 1806 struct pq3etsec_softc *sc, 1807 struct pq3etsec_txqueue *txq, 1808 u_int qno) 1809 { 1810 size_t map_size = PAGE_SIZE; 1811 size_t desc_count = map_size / sizeof(struct txbd); 1812 int error; 1813 void *descs; 1814 1815 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1816 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1817 if (error) 1818 return error; 1819 1820 memset(descs, 0, map_size); 1821 txq->txq_first = descs; 1822 txq->txq_last = txq->txq_first + desc_count; 1823 txq->txq_consumer = descs; 1824 txq->txq_producer = descs; 1825 1826 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1827 1828 txq->txq_reg_tbase = TBASEn(qno); 1829 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1830 1831 pq3etsec_txq_reset(sc, txq); 1832 1833 return 0; 1834 } 1835 1836 static int 1837 pq3etsec_txq_map_load( 1838 struct pq3etsec_softc *sc, 1839 struct pq3etsec_txqueue *txq, 1840 struct mbuf *m) 1841 { 1842 bus_dmamap_t map; 1843 int error; 1844 1845 map = M_GETCTX(m, bus_dmamap_t); 1846 if (map != NULL) 1847 return 0; 1848 1849 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1850 if (map == NULL) 1851 return ENOMEM; 1852 1853 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1854 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1855 if (error) 1856 return error; 1857 1858 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1859 BUS_DMASYNC_PREWRITE); 1860 M_SETCTX(m, map); 1861 return 0; 1862 } 1863 1864 static void 1865 pq3etsec_txq_map_unload( 1866 struct pq3etsec_softc *sc, 1867 struct pq3etsec_txqueue *txq, 1868 struct mbuf *m) 1869 { 1870 KASSERT(m); 1871 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1872 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1873 BUS_DMASYNC_POSTWRITE); 1874 bus_dmamap_unload(sc->sc_dmat, map); 1875 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1876 } 1877 1878 static bool 1879 pq3etsec_txq_produce( 1880 struct pq3etsec_softc *sc, 1881 struct pq3etsec_txqueue *txq, 1882 struct mbuf *m) 1883 { 1884 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1885 1886 if (map->dm_nsegs > txq->txq_free) 1887 return false; 1888 1889 /* 1890 * TCP Offload flag must be set in the first descriptor. 1891 */ 1892 volatile struct txbd *producer = txq->txq_producer; 1893 uint16_t last_flags = TXBD_L; 1894 uint16_t first_flags = TXBD_R 1895 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1896 1897 /* 1898 * If we've produced enough descriptors without consuming any 1899 * we need to ask for an interrupt to reclaim some. 1900 */ 1901 txq->txq_lastintr += map->dm_nsegs; 1902 if (ETSEC_IC_TX_ENABLED(sc) 1903 || txq->txq_lastintr >= txq->txq_threshold 1904 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1905 txq->txq_lastintr = 0; 1906 last_flags |= TXBD_I; 1907 } 1908 1909 #ifdef ETSEC_DEBUG 1910 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1911 #endif 1912 KASSERT(producer != txq->txq_last); 1913 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1914 producer->txbd_len = map->dm_segs[0].ds_len; 1915 1916 if (map->dm_nsegs > 1) { 1917 volatile struct txbd *start = producer + 1; 1918 size_t count = map->dm_nsegs - 1; 1919 for (u_int i = 1; i < map->dm_nsegs; i++) { 1920 if (__predict_false(++producer == txq->txq_last)) { 1921 producer = txq->txq_first; 1922 if (start < txq->txq_last) { 1923 pq3etsec_txq_desc_presync(sc, txq, 1924 start, txq->txq_last - start); 1925 count -= txq->txq_last - start; 1926 } 1927 start = txq->txq_first; 1928 } 1929 #ifdef ETSEC_DEBUG 1930 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1931 #endif 1932 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1933 producer->txbd_len = map->dm_segs[i].ds_len; 1934 producer->txbd_flags = TXBD_R 1935 | (producer->txbd_flags & TXBD_W) 1936 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1937 #if 0 1938 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1939 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1940 #endif 1941 } 1942 pq3etsec_txq_desc_presync(sc, txq, start, count); 1943 } else { 1944 first_flags |= last_flags; 1945 } 1946 1947 membar_producer(); 1948 txq->txq_producer->txbd_flags = 1949 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1950 #if 0 1951 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1952 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1953 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1954 #endif 1955 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1956 1957 /* 1958 * Reduce free count by the number of segments we consumed. 1959 */ 1960 txq->txq_free -= map->dm_nsegs; 1961 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1962 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1963 KASSERT(producer->txbd_flags & TXBD_L); 1964 #ifdef ETSEC_DEBUG 1965 txq->txq_lmbufs[producer - txq->txq_first] = m; 1966 #endif 1967 1968 #if 0 1969 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1970 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1971 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1972 #endif 1973 1974 if (++producer == txq->txq_last) 1975 txq->txq_producer = txq->txq_first; 1976 else 1977 txq->txq_producer = producer; 1978 IF_ENQUEUE(&txq->txq_mbufs, m); 1979 1980 /* 1981 * Restart the transmitter. 1982 */ 1983 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 1984 1985 return true; 1986 } 1987 1988 static void 1989 pq3etsec_tx_offload( 1990 struct pq3etsec_softc *sc, 1991 struct pq3etsec_txqueue *txq, 1992 struct mbuf **mp) 1993 { 1994 struct mbuf *m = *mp; 1995 u_int csum_flags = m->m_pkthdr.csum_flags; 1996 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m); 1997 1998 KASSERT(m->m_flags & M_PKTHDR); 1999 2000 /* 2001 * Let see if we are doing any offload first. 2002 */ 2003 if (csum_flags == 0 && vtag == 0) { 2004 m->m_flags &= ~M_HASFCB; 2005 return; 2006 } 2007 2008 uint16_t flags = 0; 2009 if (csum_flags & M_CSUM_IP) { 2010 flags |= TXFCB_IP 2011 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2012 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2013 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2014 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2015 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2016 } 2017 if (vtag) { 2018 flags |= TXFCB_VLN; 2019 } 2020 if (flags == 0) { 2021 m->m_flags &= ~M_HASFCB; 2022 return; 2023 } 2024 2025 struct txfcb fcb; 2026 fcb.txfcb_flags = flags; 2027 if (csum_flags & M_CSUM_IPv4) 2028 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2029 else 2030 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); 2031 fcb.txfcb_l3os = ETHER_HDR_LEN; 2032 fcb.txfcb_phcs = 0; 2033 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0; 2034 2035 #if 0 2036 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2037 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2038 fcb.txfcb_phcs, fcb.txfcb_vlctl); 2039 #endif 2040 2041 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2042 m->m_data -= sizeof(fcb); 2043 m->m_len += sizeof(fcb); 2044 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2045 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2046 m->m_data = m->m_pktdat; 2047 m->m_len += sizeof(fcb); 2048 } else { 2049 struct mbuf *mn; 2050 MGET(mn, M_DONTWAIT, m->m_type); 2051 if (mn == NULL) { 2052 if (csum_flags & M_CSUM_IP4) { 2053 #ifdef INET 2054 ip_undefer_csum(m, ETHER_HDR_LEN, 2055 csum_flags & M_CSUM_IP4); 2056 #else 2057 panic("%s: impossible M_CSUM flags %#x", 2058 device_xname(sc->sc_dev), csum_flags); 2059 #endif 2060 } else if (csum_flags & M_CSUM_IP6) { 2061 #ifdef INET6 2062 ip6_undefer_csum(m, ETHER_HDR_LEN, 2063 csum_flags & M_CSUM_IP6); 2064 #else 2065 panic("%s: impossible M_CSUM flags %#x", 2066 device_xname(sc->sc_dev), csum_flags); 2067 #endif 2068 } else if (vtag) { 2069 } 2070 2071 m->m_flags &= ~M_HASFCB; 2072 return; 2073 } 2074 2075 M_MOVE_PKTHDR(mn, m); 2076 mn->m_next = m; 2077 m = mn; 2078 MH_ALIGN(m, sizeof(fcb)); 2079 m->m_len = sizeof(fcb); 2080 *mp = m; 2081 } 2082 m->m_pkthdr.len += sizeof(fcb); 2083 m->m_flags |= M_HASFCB; 2084 *mtod(m, struct txfcb *) = fcb; 2085 return; 2086 } 2087 2088 static bool 2089 pq3etsec_txq_enqueue( 2090 struct pq3etsec_softc *sc, 2091 struct pq3etsec_txqueue *txq) 2092 { 2093 for (;;) { 2094 if (IF_QFULL(&txq->txq_mbufs)) 2095 return false; 2096 struct mbuf *m = txq->txq_next; 2097 if (m == NULL) { 2098 int s = splnet(); 2099 IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2100 splx(s); 2101 if (m == NULL) 2102 return true; 2103 M_SETCTX(m, NULL); 2104 pq3etsec_tx_offload(sc, txq, &m); 2105 } else { 2106 txq->txq_next = NULL; 2107 } 2108 int error = pq3etsec_txq_map_load(sc, txq, m); 2109 if (error) { 2110 aprint_error_dev(sc->sc_dev, 2111 "discarded packet due to " 2112 "dmamap load failure: %d\n", error); 2113 m_freem(m); 2114 continue; 2115 } 2116 KASSERT(txq->txq_next == NULL); 2117 if (!pq3etsec_txq_produce(sc, txq, m)) { 2118 txq->txq_next = m; 2119 return false; 2120 } 2121 KASSERT(txq->txq_next == NULL); 2122 } 2123 } 2124 2125 static bool 2126 pq3etsec_txq_consume( 2127 struct pq3etsec_softc *sc, 2128 struct pq3etsec_txqueue *txq) 2129 { 2130 struct ifnet * const ifp = &sc->sc_if; 2131 volatile struct txbd *consumer = txq->txq_consumer; 2132 size_t txfree = 0; 2133 2134 #if 0 2135 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2136 #endif 2137 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2138 2139 for (;;) { 2140 if (consumer == txq->txq_producer) { 2141 txq->txq_consumer = consumer; 2142 txq->txq_free += txfree; 2143 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2144 #if 0 2145 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2146 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2147 #endif 2148 KASSERT(txq->txq_lastintr == 0); 2149 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2150 return true; 2151 } 2152 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2153 const uint16_t txbd_flags = consumer->txbd_flags; 2154 if (txbd_flags & TXBD_R) { 2155 txq->txq_consumer = consumer; 2156 txq->txq_free += txfree; 2157 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2158 #if 0 2159 printf("%s: freed %zu descriptors\n", 2160 __func__, txfree); 2161 #endif 2162 return pq3etsec_txq_fillable_p(sc, txq); 2163 } 2164 2165 /* 2166 * If this is the last descriptor in the chain, get the 2167 * mbuf, free its dmamap, and free the mbuf chain itself. 2168 */ 2169 if (txbd_flags & TXBD_L) { 2170 struct mbuf *m; 2171 2172 IF_DEQUEUE(&txq->txq_mbufs, m); 2173 #ifdef ETSEC_DEBUG 2174 KASSERTMSG( 2175 m == txq->txq_lmbufs[consumer-txq->txq_first], 2176 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2177 __func__, consumer, consumer - txq->txq_first, 2178 txbd_flags, m, 2179 &txq->txq_lmbufs[consumer-txq->txq_first], 2180 txq->txq_lmbufs[consumer-txq->txq_first]); 2181 #endif 2182 KASSERT(m); 2183 pq3etsec_txq_map_unload(sc, txq, m); 2184 #if 0 2185 printf("%s: mbuf %p: consumed a %u byte packet\n", 2186 __func__, m, m->m_pkthdr.len); 2187 #endif 2188 if (m->m_flags & M_HASFCB) 2189 m_adj(m, sizeof(struct txfcb)); 2190 bpf_mtap(ifp, m); 2191 ifp->if_opackets++; 2192 ifp->if_obytes += m->m_pkthdr.len; 2193 if (m->m_flags & M_MCAST) 2194 ifp->if_omcasts++; 2195 if (txbd_flags & TXBD_ERRORS) 2196 ifp->if_oerrors++; 2197 m_freem(m); 2198 #ifdef ETSEC_DEBUG 2199 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2200 #endif 2201 } else { 2202 #ifdef ETSEC_DEBUG 2203 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2204 #endif 2205 } 2206 2207 /* 2208 * We own this packet again. Clear all flags except wrap. 2209 */ 2210 txfree++; 2211 //consumer->txbd_flags = txbd_flags & TXBD_W; 2212 2213 /* 2214 * Wrap at the last entry! 2215 */ 2216 if (txbd_flags & TXBD_W) { 2217 KASSERT(consumer + 1 == txq->txq_last); 2218 consumer = txq->txq_first; 2219 } else { 2220 consumer++; 2221 KASSERT(consumer < txq->txq_last); 2222 } 2223 } 2224 } 2225 2226 static void 2227 pq3etsec_txq_purge( 2228 struct pq3etsec_softc *sc, 2229 struct pq3etsec_txqueue *txq) 2230 { 2231 struct mbuf *m; 2232 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2233 2234 for (;;) { 2235 IF_DEQUEUE(&txq->txq_mbufs, m); 2236 if (m == NULL) 2237 break; 2238 pq3etsec_txq_map_unload(sc, txq, m); 2239 m_freem(m); 2240 } 2241 if ((m = txq->txq_next) != NULL) { 2242 txq->txq_next = NULL; 2243 pq3etsec_txq_map_unload(sc, txq, m); 2244 m_freem(m); 2245 } 2246 #ifdef ETSEC_DEBUG 2247 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2248 #endif 2249 } 2250 2251 static void 2252 pq3etsec_txq_reset( 2253 struct pq3etsec_softc *sc, 2254 struct pq3etsec_txqueue *txq) 2255 { 2256 /* 2257 * sync all the descriptors 2258 */ 2259 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2260 txq->txq_last - txq->txq_first); 2261 2262 /* 2263 * Make sure we own all descriptors in the ring. 2264 */ 2265 volatile struct txbd *txbd; 2266 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2267 txbd->txbd_flags = 0; 2268 } 2269 2270 /* 2271 * Last descriptor has the wrap flag. 2272 */ 2273 txbd->txbd_flags = TXBD_W; 2274 2275 /* 2276 * Reset the producer consumer indexes. 2277 */ 2278 txq->txq_consumer = txq->txq_first; 2279 txq->txq_producer = txq->txq_first; 2280 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2281 txq->txq_threshold = txq->txq_free / 2; 2282 txq->txq_lastintr = 0; 2283 2284 /* 2285 * What do we want to get interrupted on? 2286 */ 2287 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2288 2289 /* 2290 * Restart the transmit at the first descriptor 2291 */ 2292 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2293 } 2294 2295 static void 2296 pq3etsec_ifstart(struct ifnet *ifp) 2297 { 2298 struct pq3etsec_softc * const sc = ifp->if_softc; 2299 2300 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { 2301 return; 2302 } 2303 2304 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2305 softint_schedule(sc->sc_soft_ih); 2306 } 2307 2308 static void 2309 pq3etsec_tx_error( 2310 struct pq3etsec_softc * const sc) 2311 { 2312 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2313 2314 pq3etsec_txq_consume(sc, txq); 2315 2316 if (pq3etsec_txq_fillable_p(sc, txq)) 2317 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2318 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2319 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2320 } 2321 2322 if (pq3etsec_txq_active_p(sc, txq)) 2323 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2324 if (!pq3etsec_txq_enqueue(sc, txq)) { 2325 sc->sc_ev_tx_stall.ev_count++; 2326 sc->sc_if.if_flags |= IFF_OACTIVE; 2327 } 2328 2329 sc->sc_txerrors = 0; 2330 } 2331 2332 int 2333 pq3etsec_tx_intr(void *arg) 2334 { 2335 struct pq3etsec_softc * const sc = arg; 2336 2337 mutex_enter(sc->sc_hwlock); 2338 2339 sc->sc_ev_tx_intr.ev_count++; 2340 2341 uint32_t ievent = etsec_read(sc, IEVENT); 2342 ievent &= IEVENT_TXF|IEVENT_TXB; 2343 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2344 2345 #if 0 2346 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2347 __func__, ievent, etsec_read(sc, IMASK)); 2348 #endif 2349 2350 if (ievent == 0) { 2351 mutex_exit(sc->sc_hwlock); 2352 return 0; 2353 } 2354 2355 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2356 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2357 etsec_write(sc, IMASK, sc->sc_imask); 2358 softint_schedule(sc->sc_soft_ih); 2359 2360 mutex_exit(sc->sc_hwlock); 2361 2362 return 1; 2363 } 2364 2365 int 2366 pq3etsec_rx_intr(void *arg) 2367 { 2368 struct pq3etsec_softc * const sc = arg; 2369 2370 mutex_enter(sc->sc_hwlock); 2371 2372 sc->sc_ev_rx_intr.ev_count++; 2373 2374 uint32_t ievent = etsec_read(sc, IEVENT); 2375 ievent &= IEVENT_RXF|IEVENT_RXB; 2376 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2377 if (ievent == 0) { 2378 mutex_exit(sc->sc_hwlock); 2379 return 0; 2380 } 2381 2382 #if 0 2383 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2384 #endif 2385 2386 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2387 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2388 etsec_write(sc, IMASK, sc->sc_imask); 2389 softint_schedule(sc->sc_soft_ih); 2390 2391 mutex_exit(sc->sc_hwlock); 2392 2393 return 1; 2394 } 2395 2396 int 2397 pq3etsec_error_intr(void *arg) 2398 { 2399 struct pq3etsec_softc * const sc = arg; 2400 2401 mutex_enter(sc->sc_hwlock); 2402 2403 sc->sc_ev_error_intr.ev_count++; 2404 2405 for (int rv = 0, soft_flags = 0;; rv = 1) { 2406 uint32_t ievent = etsec_read(sc, IEVENT); 2407 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2408 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2409 if (ievent == 0) { 2410 if (soft_flags) { 2411 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2412 softint_schedule(sc->sc_soft_ih); 2413 } 2414 mutex_exit(sc->sc_hwlock); 2415 return rv; 2416 } 2417 #if 0 2418 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2419 __func__, ievent, etsec_read(sc, IMASK)); 2420 #endif 2421 2422 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2423 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2424 etsec_write(sc, IMASK, sc->sc_imask); 2425 wakeup(sc); 2426 } 2427 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2428 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2429 etsec_write(sc, IMASK, sc->sc_imask); 2430 wakeup(&sc->sc_mii); 2431 } 2432 if (ievent & IEVENT_BSY) { 2433 soft_flags |= SOFT_RXBSY; 2434 sc->sc_imask &= ~IEVENT_BSY; 2435 etsec_write(sc, IMASK, sc->sc_imask); 2436 } 2437 if (ievent & IEVENT_TXE) { 2438 soft_flags |= SOFT_TXERROR; 2439 sc->sc_imask &= ~IEVENT_TXE; 2440 sc->sc_txerrors |= ievent; 2441 } 2442 if (ievent & IEVENT_TXC) { 2443 sc->sc_ev_tx_pause.ev_count++; 2444 } 2445 if (ievent & IEVENT_RXC) { 2446 sc->sc_ev_rx_pause.ev_count++; 2447 } 2448 if (ievent & IEVENT_DPE) { 2449 soft_flags |= SOFT_RESET; 2450 sc->sc_imask &= ~IEVENT_DPE; 2451 etsec_write(sc, IMASK, sc->sc_imask); 2452 } 2453 } 2454 } 2455 2456 void 2457 pq3etsec_soft_intr(void *arg) 2458 { 2459 struct pq3etsec_softc * const sc = arg; 2460 struct ifnet * const ifp = &sc->sc_if; 2461 uint32_t imask = 0; 2462 2463 mutex_enter(sc->sc_lock); 2464 2465 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2466 2467 sc->sc_ev_soft_intr.ev_count++; 2468 2469 if (soft_flags & SOFT_RESET) { 2470 int s = splnet(); 2471 pq3etsec_ifinit(ifp); 2472 splx(s); 2473 soft_flags = 0; 2474 } 2475 2476 if (soft_flags & SOFT_RXBSY) { 2477 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2478 size_t threshold = 5 * rxq->rxq_threshold / 4; 2479 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2480 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2481 } else { 2482 imask |= IEVENT_BSY; 2483 } 2484 aprint_normal_dev(sc->sc_dev, 2485 "increasing receive buffers from %zu to %zu\n", 2486 rxq->rxq_threshold, threshold); 2487 rxq->rxq_threshold = threshold; 2488 } 2489 2490 if ((soft_flags & SOFT_TXINTR) 2491 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2492 /* 2493 * Let's do what we came here for. Consume transmitted 2494 * packets off the the transmit ring. 2495 */ 2496 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2497 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2498 sc->sc_ev_tx_stall.ev_count++; 2499 ifp->if_flags |= IFF_OACTIVE; 2500 } else { 2501 ifp->if_flags &= ~IFF_OACTIVE; 2502 } 2503 imask |= IEVENT_TXF; 2504 } 2505 2506 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2507 /* 2508 * Let's consume 2509 */ 2510 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2511 imask |= IEVENT_RXF; 2512 } 2513 2514 if (soft_flags & SOFT_TXERROR) { 2515 pq3etsec_tx_error(sc); 2516 imask |= IEVENT_TXE; 2517 } 2518 2519 if (ifp->if_flags & IFF_RUNNING) { 2520 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2521 mutex_spin_enter(sc->sc_hwlock); 2522 sc->sc_imask |= imask; 2523 etsec_write(sc, IMASK, sc->sc_imask); 2524 mutex_spin_exit(sc->sc_hwlock); 2525 } else { 2526 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2527 } 2528 2529 mutex_exit(sc->sc_lock); 2530 } 2531 2532 static void 2533 pq3etsec_mii_tick(void *arg) 2534 { 2535 struct pq3etsec_softc * const sc = arg; 2536 mutex_enter(sc->sc_lock); 2537 callout_ack(&sc->sc_mii_callout); 2538 sc->sc_ev_mii_ticks.ev_count++; 2539 #ifdef DEBUG 2540 uint64_t now = mftb(); 2541 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2542 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2543 __func__, now - sc->sc_mii_last_tick); 2544 callout_stop(&sc->sc_mii_callout); 2545 } 2546 #endif 2547 mii_tick(&sc->sc_mii); 2548 int s = splnet(); 2549 if (sc->sc_soft_flags & SOFT_RESET) 2550 softint_schedule(sc->sc_soft_ih); 2551 splx(s); 2552 callout_schedule(&sc->sc_mii_callout, hz); 2553 #ifdef DEBUG 2554 sc->sc_mii_last_tick = now; 2555 #endif 2556 mutex_exit(sc->sc_lock); 2557 } 2558 2559 static void 2560 pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 2561 { 2562 uint32_t reg; 2563 2564 if (ETSEC_IC_RX_ENABLED(sc)) { 2565 reg = RXIC_ICEN; 2566 reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 2567 reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 2568 } else { 2569 /* Disable RX interrupt coalescing */ 2570 reg = 0; 2571 } 2572 2573 etsec_write(sc, RXIC, reg); 2574 } 2575 2576 static void 2577 pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 2578 { 2579 uint32_t reg; 2580 2581 if (ETSEC_IC_TX_ENABLED(sc)) { 2582 reg = TXIC_ICEN; 2583 reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 2584 reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 2585 } else { 2586 /* Disable TX interrupt coalescing */ 2587 reg = 0; 2588 } 2589 2590 etsec_write(sc, TXIC, reg); 2591 } 2592 2593 /* 2594 * sysctl 2595 */ 2596 static int 2597 pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 2598 { 2599 struct sysctlnode node = *rnode; 2600 struct pq3etsec_softc *sc = rnode->sysctl_data; 2601 int value = *valuep; 2602 int error; 2603 2604 node.sysctl_data = &value; 2605 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2606 if (error != 0 || newp == NULL) 2607 return error; 2608 2609 if (value < 0 || value > 65535) 2610 return EINVAL; 2611 2612 mutex_enter(sc->sc_lock); 2613 *valuep = value; 2614 if (valuep == &sc->sc_ic_rx_time) 2615 pq3etsec_set_ic_rx(sc); 2616 else 2617 pq3etsec_set_ic_tx(sc); 2618 mutex_exit(sc->sc_lock); 2619 2620 return 0; 2621 } 2622 2623 static int 2624 pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 2625 { 2626 struct sysctlnode node = *rnode; 2627 struct pq3etsec_softc *sc = rnode->sysctl_data; 2628 int value = *valuep; 2629 int error; 2630 2631 node.sysctl_data = &value; 2632 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2633 if (error != 0 || newp == NULL) 2634 return error; 2635 2636 if (value < 0 || value > 255) 2637 return EINVAL; 2638 2639 mutex_enter(sc->sc_lock); 2640 *valuep = value; 2641 if (valuep == &sc->sc_ic_rx_count) 2642 pq3etsec_set_ic_rx(sc); 2643 else 2644 pq3etsec_set_ic_tx(sc); 2645 mutex_exit(sc->sc_lock); 2646 2647 return 0; 2648 } 2649 2650 static int 2651 pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 2652 { 2653 struct pq3etsec_softc *sc = rnode->sysctl_data; 2654 2655 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2656 &sc->sc_ic_rx_time); 2657 } 2658 2659 static int 2660 pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 2661 { 2662 struct pq3etsec_softc *sc = rnode->sysctl_data; 2663 2664 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2665 &sc->sc_ic_rx_count); 2666 } 2667 2668 static int 2669 pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 2670 { 2671 struct pq3etsec_softc *sc = rnode->sysctl_data; 2672 2673 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2674 &sc->sc_ic_tx_time); 2675 } 2676 2677 static int 2678 pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 2679 { 2680 struct pq3etsec_softc *sc = rnode->sysctl_data; 2681 2682 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2683 &sc->sc_ic_tx_count); 2684 } 2685 2686 static void pq3etsec_sysctl_setup(struct sysctllog **clog, 2687 struct pq3etsec_softc *sc) 2688 { 2689 const struct sysctlnode *cnode, *rnode; 2690 2691 if (sysctl_createv(clog, 0, NULL, &rnode, 2692 CTLFLAG_PERMANENT, 2693 CTLTYPE_NODE, device_xname(sc->sc_dev), 2694 SYSCTL_DESCR("TSEC interface"), 2695 NULL, 0, NULL, 0, 2696 CTL_HW, CTL_CREATE, CTL_EOL) != 0) 2697 goto bad; 2698 2699 if (sysctl_createv(clog, 0, &rnode, &rnode, 2700 CTLFLAG_PERMANENT, 2701 CTLTYPE_NODE, "int_coal", 2702 SYSCTL_DESCR("Interrupts coalescing"), 2703 NULL, 0, NULL, 0, 2704 CTL_CREATE, CTL_EOL) != 0) 2705 goto bad; 2706 2707 if (sysctl_createv(clog, 0, &rnode, &cnode, 2708 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2709 CTLTYPE_INT, "rx_time", 2710 SYSCTL_DESCR("RX time threshold (0-65535)"), 2711 pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 2712 CTL_CREATE, CTL_EOL) != 0) 2713 goto bad; 2714 2715 if (sysctl_createv(clog, 0, &rnode, &cnode, 2716 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2717 CTLTYPE_INT, "rx_count", 2718 SYSCTL_DESCR("RX frame count threshold (0-255)"), 2719 pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 2720 CTL_CREATE, CTL_EOL) != 0) 2721 goto bad; 2722 2723 if (sysctl_createv(clog, 0, &rnode, &cnode, 2724 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2725 CTLTYPE_INT, "tx_time", 2726 SYSCTL_DESCR("TX time threshold (0-65535)"), 2727 pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 2728 CTL_CREATE, CTL_EOL) != 0) 2729 goto bad; 2730 2731 if (sysctl_createv(clog, 0, &rnode, &cnode, 2732 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2733 CTLTYPE_INT, "tx_count", 2734 SYSCTL_DESCR("TX frame count threshold (0-255)"), 2735 pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 2736 CTL_CREATE, CTL_EOL) != 0) 2737 goto bad; 2738 2739 return; 2740 2741 bad: 2742 aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 2743 } 2744