1 /* $NetBSD: pq3etsec.c,v 1.50 2020/07/06 09:34:16 rin Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.50 2020/07/06 09:34:16 rin Exp $"); 39 40 #ifdef _KERNEL_OPT 41 #include "opt_inet.h" 42 #include "opt_mpc85xx.h" 43 #include "opt_multiprocessor.h" 44 #include "opt_net_mpsafe.h" 45 #endif 46 47 #include <sys/param.h> 48 #include <sys/cpu.h> 49 #include <sys/device.h> 50 #include <sys/mbuf.h> 51 #include <sys/ioctl.h> 52 #include <sys/intr.h> 53 #include <sys/bus.h> 54 #include <sys/kernel.h> 55 #include <sys/kmem.h> 56 #include <sys/proc.h> 57 #include <sys/atomic.h> 58 #include <sys/callout.h> 59 #include <sys/sysctl.h> 60 61 #include <net/if.h> 62 #include <net/if_dl.h> 63 #include <net/if_ether.h> 64 #include <net/if_media.h> 65 #include <net/bpf.h> 66 67 #include <dev/mii/miivar.h> 68 69 #ifdef INET 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/in_offload.h> 74 #endif /* INET */ 75 #ifdef INET6 76 #include <netinet6/in6.h> 77 #include <netinet/ip6.h> 78 #endif 79 #include <netinet6/in6_offload.h> 80 81 #include <powerpc/spr.h> 82 #include <powerpc/booke/spr.h> 83 #include <powerpc/booke/cpuvar.h> 84 #include <powerpc/booke/e500var.h> 85 #include <powerpc/booke/e500reg.h> 86 #include <powerpc/booke/etsecreg.h> 87 88 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 89 90 #define ETSEC_MAXTXMBUFS 30 91 #define ETSEC_NTXSEGS 30 92 #define ETSEC_MAXRXMBUFS 511 93 #define ETSEC_MINRXMBUFS 32 94 #define ETSEC_NRXSEGS 1 95 96 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 97 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx \ 98 | IFCAP_CSUM_UDPv4_Rx \ 99 | IFCAP_CSUM_TCPv6_Rx \ 100 | IFCAP_CSUM_UDPv6_Rx) 101 102 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 103 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx \ 104 | IFCAP_CSUM_UDPv4_Tx \ 105 | IFCAP_CSUM_TCPv6_Tx \ 106 | IFCAP_CSUM_UDPv6_Tx) 107 108 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN | IFCAP_RCTRL_TUCSEN \ 109 | IFCAP_TCTRL_IPCSEN | IFCAP_TCTRL_TUCSEN) 110 111 #define M_CSUM_IP (M_CSUM_CIP | M_CSUM_CTU) 112 #define M_CSUM_IP6 (M_CSUM_TCPv6 | M_CSUM_UDPv6) 113 #define M_CSUM_TUP (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 114 #define M_CSUM_UDP (M_CSUM_UDPv4 | M_CSUM_UDPv6) 115 #define M_CSUM_IP4 (M_CSUM_IPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4) 116 #define M_CSUM_CIP (M_CSUM_IPv4) 117 #define M_CSUM_CTU (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 118 119 struct pq3etsec_txqueue { 120 bus_dmamap_t txq_descmap; 121 volatile struct txbd *txq_consumer; 122 volatile struct txbd *txq_producer; 123 volatile struct txbd *txq_first; 124 volatile struct txbd *txq_last; 125 struct ifqueue txq_mbufs; 126 struct mbuf *txq_next; 127 #ifdef ETSEC_DEBUG 128 struct mbuf *txq_lmbufs[512]; 129 #endif 130 uint32_t txq_qmask; 131 uint32_t txq_free; 132 uint32_t txq_threshold; 133 uint32_t txq_lastintr; 134 bus_size_t txq_reg_tbase; 135 bus_dma_segment_t txq_descmap_seg; 136 }; 137 138 struct pq3etsec_rxqueue { 139 bus_dmamap_t rxq_descmap; 140 volatile struct rxbd *rxq_consumer; 141 volatile struct rxbd *rxq_producer; 142 volatile struct rxbd *rxq_first; 143 volatile struct rxbd *rxq_last; 144 struct mbuf *rxq_mhead; 145 struct mbuf **rxq_mtail; 146 struct mbuf *rxq_mconsumer; 147 #ifdef ETSEC_DEBUG 148 struct mbuf *rxq_mbufs[512]; 149 #endif 150 uint32_t rxq_qmask; 151 uint32_t rxq_inuse; 152 uint32_t rxq_threshold; 153 bus_size_t rxq_reg_rbase; 154 bus_size_t rxq_reg_rbptr; 155 bus_dma_segment_t rxq_descmap_seg; 156 }; 157 158 struct pq3etsec_mapcache { 159 u_int dmc_nmaps; 160 u_int dmc_maxseg; 161 u_int dmc_maxmaps; 162 u_int dmc_maxmapsize; 163 bus_dmamap_t dmc_maps[0]; 164 }; 165 166 struct pq3etsec_softc { 167 device_t sc_dev; 168 device_t sc_mdio_dev; 169 struct ethercom sc_ec; 170 #define sc_if sc_ec.ec_if 171 struct mii_data sc_mii; 172 bus_space_tag_t sc_bst; 173 bus_space_handle_t sc_bsh; 174 bus_space_handle_t sc_mdio_bsh; 175 bus_dma_tag_t sc_dmat; 176 int sc_phy_addr; 177 prop_dictionary_t sc_intrmap; 178 uint32_t sc_intrmask; 179 180 uint32_t sc_soft_flags; 181 #define SOFT_RESET 0x0001 182 #define SOFT_RXINTR 0x0010 183 #define SOFT_RXBSY 0x0020 184 #define SOFT_TXINTR 0x0100 185 #define SOFT_TXERROR 0x0200 186 187 struct pq3etsec_txqueue sc_txq; 188 struct pq3etsec_rxqueue sc_rxq; 189 uint32_t sc_txerrors; 190 uint32_t sc_rxerrors; 191 192 size_t sc_rx_adjlen; 193 194 /* 195 * Copies of various ETSEC registers. 196 */ 197 uint32_t sc_imask; 198 uint32_t sc_maccfg1; 199 uint32_t sc_maccfg2; 200 uint32_t sc_maxfrm; 201 uint32_t sc_ecntrl; 202 uint32_t sc_dmactrl; 203 uint32_t sc_macstnaddr1; 204 uint32_t sc_macstnaddr2; 205 uint32_t sc_tctrl; 206 uint32_t sc_rctrl; 207 uint32_t sc_gaddr[16]; 208 uint64_t sc_macaddrs[15]; 209 210 void *sc_tx_ih; 211 void *sc_rx_ih; 212 void *sc_error_ih; 213 void *sc_soft_ih; 214 215 kmutex_t *sc_lock; 216 kmutex_t *sc_hwlock; 217 218 struct evcnt sc_ev_tx_stall; 219 struct evcnt sc_ev_tx_intr; 220 struct evcnt sc_ev_rx_stall; 221 struct evcnt sc_ev_rx_intr; 222 struct evcnt sc_ev_error_intr; 223 struct evcnt sc_ev_soft_intr; 224 struct evcnt sc_ev_tx_pause; 225 struct evcnt sc_ev_rx_pause; 226 struct evcnt sc_ev_mii_ticks; 227 228 struct callout sc_mii_callout; 229 uint64_t sc_mii_last_tick; 230 231 struct ifqueue sc_rx_bufcache; 232 struct pq3etsec_mapcache *sc_rx_mapcache; 233 struct pq3etsec_mapcache *sc_tx_mapcache; 234 235 /* Interrupt Coalescing parameters */ 236 int sc_ic_rx_time; 237 int sc_ic_rx_count; 238 int sc_ic_tx_time; 239 int sc_ic_tx_count; 240 }; 241 242 #define ETSEC_IC_RX_ENABLED(sc) \ 243 ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 244 #define ETSEC_IC_TX_ENABLED(sc) \ 245 ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 246 247 struct pq3mdio_softc { 248 device_t mdio_dev; 249 250 kmutex_t *mdio_lock; 251 252 bus_space_tag_t mdio_bst; 253 bus_space_handle_t mdio_bsh; 254 }; 255 256 static int pq3etsec_match(device_t, cfdata_t, void *); 257 static void pq3etsec_attach(device_t, device_t, void *); 258 259 static int pq3mdio_match(device_t, cfdata_t, void *); 260 static void pq3mdio_attach(device_t, device_t, void *); 261 262 static void pq3etsec_ifstart(struct ifnet *); 263 static void pq3etsec_ifwatchdog(struct ifnet *); 264 static int pq3etsec_ifinit(struct ifnet *); 265 static void pq3etsec_ifstop(struct ifnet *, int); 266 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 267 268 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 269 struct pq3etsec_mapcache **, size_t, size_t, size_t); 270 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 271 struct pq3etsec_mapcache *); 272 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 273 struct pq3etsec_mapcache *); 274 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 275 struct pq3etsec_mapcache *, bus_dmamap_t); 276 277 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 278 struct pq3etsec_txqueue *, u_int); 279 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 280 struct pq3etsec_txqueue *); 281 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 282 struct pq3etsec_txqueue *); 283 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 284 struct pq3etsec_txqueue *); 285 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 286 struct pq3etsec_txqueue *, struct mbuf *m); 287 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 288 struct pq3etsec_txqueue *); 289 290 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 291 struct pq3etsec_rxqueue *, u_int); 292 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 293 struct pq3etsec_rxqueue *); 294 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 295 struct pq3etsec_rxqueue *, bool); 296 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 297 struct pq3etsec_rxqueue *); 298 299 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 300 301 static void pq3etsec_mii_tick(void *); 302 static int pq3etsec_rx_intr(void *); 303 static int pq3etsec_tx_intr(void *); 304 static int pq3etsec_error_intr(void *); 305 static void pq3etsec_soft_intr(void *); 306 307 static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 308 static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 309 310 static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 311 312 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 313 pq3etsec_match, pq3etsec_attach, NULL, NULL); 314 315 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 316 pq3mdio_match, pq3mdio_attach, NULL, NULL); 317 318 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 319 pq3mdio_match, pq3mdio_attach, NULL, NULL); 320 321 static inline uint32_t 322 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 323 { 324 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 325 } 326 327 static inline void 328 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 329 { 330 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 331 } 332 333 static inline uint32_t 334 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 335 { 336 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 337 } 338 339 static int 340 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 341 { 342 return strcmp(cf->cf_name, "mdio") == 0; 343 } 344 345 static int 346 pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 347 { 348 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 349 const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 350 || svr == (SVR_P1016v1 >> 16)); 351 352 if (device_is_a(parent, "cpunode")) { 353 if (!p1025_p 354 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 355 return 0; 356 357 return 1; 358 } 359 360 if (device_is_a(parent, "tsec")) { 361 if (p1025_p 362 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 363 return 0; 364 365 return 1; 366 } 367 368 return 0; 369 } 370 371 static void 372 pq3mdio_attach(device_t parent, device_t self, void *aux) 373 { 374 struct pq3mdio_softc * const mdio = device_private(self); 375 struct cpunode_attach_args * const cna = aux; 376 struct cpunode_locators * const cnl = &cna->cna_locs; 377 378 mdio->mdio_dev = self; 379 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 380 381 if (device_is_a(parent, "cpunode")) { 382 struct cpunode_softc * const psc = device_private(parent); 383 psc->sc_children |= cna->cna_childmask; 384 385 mdio->mdio_bst = cna->cna_memt; 386 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 387 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 388 aprint_error(": error mapping registers @ %#x\n", 389 cnl->cnl_addr); 390 return; 391 } 392 } else { 393 struct pq3etsec_softc * const sc = device_private(parent); 394 395 KASSERT(device_is_a(parent, "tsec")); 396 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 397 || cnl->cnl_addr == ETSEC2_BASE 398 || cnl->cnl_addr == ETSEC3_BASE 399 || cnl->cnl_addr == ETSEC4_BASE, 400 "unknown tsec addr %x", cnl->cnl_addr); 401 402 mdio->mdio_bst = sc->sc_bst; 403 mdio->mdio_bsh = sc->sc_bsh; 404 } 405 406 aprint_normal("\n"); 407 } 408 409 static int 410 pq3mdio_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 411 { 412 struct pq3mdio_softc * const mdio = device_private(self); 413 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 414 415 mutex_enter(mdio->mdio_lock); 416 417 etsec_mdio_write(mdio, MIIMADD, 418 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 419 420 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 421 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 422 423 while (etsec_mdio_read(mdio, MIIMIND) != 0) { 424 delay(1); 425 } 426 *val = etsec_mdio_read(mdio, MIIMSTAT) &0xffff; 427 428 if (miimcom == MIIMCOM_SCAN) 429 etsec_mdio_write(mdio, MIIMCOM, miimcom); 430 431 #if 0 432 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 433 __func__, phy, reg, data); 434 #endif 435 mutex_exit(mdio->mdio_lock); 436 return 0; 437 } 438 439 static int 440 pq3mdio_mii_writereg(device_t self, int phy, int reg, uint16_t data) 441 { 442 struct pq3mdio_softc * const mdio = device_private(self); 443 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 444 445 #if 0 446 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 447 __func__, phy, reg, data); 448 #endif 449 450 mutex_enter(mdio->mdio_lock); 451 452 etsec_mdio_write(mdio, MIIMADD, 453 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 454 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 455 etsec_mdio_write(mdio, MIIMCON, data); 456 457 int timo = 1000; /* 1ms */ 458 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 459 delay(1); 460 } 461 462 if (miimcom == MIIMCOM_SCAN) 463 etsec_mdio_write(mdio, MIIMCOM, miimcom); 464 465 mutex_exit(mdio->mdio_lock); 466 467 return 0; 468 } 469 470 static inline void 471 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 472 { 473 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 474 } 475 476 static void 477 pq3etsec_mii_statchg(struct ifnet *ifp) 478 { 479 struct pq3etsec_softc * const sc = ifp->if_softc; 480 struct mii_data * const mii = &sc->sc_mii; 481 482 uint32_t maccfg1 = sc->sc_maccfg1; 483 uint32_t maccfg2 = sc->sc_maccfg2; 484 uint32_t ecntrl = sc->sc_ecntrl; 485 486 maccfg1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 487 maccfg2 &= ~(MACCFG2_IFMODE | MACCFG2_FD); 488 489 if (sc->sc_mii.mii_media_active & IFM_FDX) { 490 maccfg2 |= MACCFG2_FD; 491 } 492 493 /* 494 * Now deal with the flow control bits. 495 */ 496 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 497 && (mii->mii_media_active & IFM_ETH_FMASK)) { 498 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 499 maccfg1 |= MACCFG1_RX_FLOW; 500 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 501 maccfg1 |= MACCFG1_TX_FLOW; 502 } 503 504 /* 505 * Now deal with the speed. 506 */ 507 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 508 maccfg2 |= MACCFG2_IFMODE_GMII; 509 } else { 510 maccfg2 |= MACCFG2_IFMODE_MII; 511 ecntrl &= ~ECNTRL_R100M; 512 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 513 ecntrl |= ECNTRL_R100M; 514 } 515 } 516 517 /* 518 * If things are different, re-init things. 519 */ 520 if (maccfg1 != sc->sc_maccfg1 521 || maccfg2 != sc->sc_maccfg2 522 || ecntrl != sc->sc_ecntrl) { 523 if (sc->sc_if.if_flags & IFF_RUNNING) 524 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 525 sc->sc_maccfg1 = maccfg1; 526 sc->sc_maccfg2 = maccfg2; 527 sc->sc_ecntrl = ecntrl; 528 } 529 } 530 531 #if 0 532 static void 533 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 534 { 535 struct pq3etsec_softc * const sc = ifp->if_softc; 536 537 mii_pollstat(&sc->sc_mii); 538 ether_mediastatus(ifp, ifmr); 539 ifmr->ifm_status = sc->sc_mii.mii_media_status; 540 ifmr->ifm_active = sc->sc_mii.mii_media_active; 541 } 542 543 static int 544 pq3etsec_mediachange(struct ifnet *ifp) 545 { 546 struct pq3etsec_softc * const sc = ifp->if_softc; 547 548 if ((ifp->if_flags & IFF_UP) == 0) 549 return 0; 550 551 int rv = mii_mediachg(&sc->sc_mii); 552 return (rv == ENXIO) ? 0 : rv; 553 } 554 #endif 555 556 static int 557 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 558 { 559 560 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 561 return 0; 562 563 return 1; 564 } 565 566 static void 567 pq3etsec_attach(device_t parent, device_t self, void *aux) 568 { 569 struct cpunode_softc * const psc = device_private(parent); 570 struct pq3etsec_softc * const sc = device_private(self); 571 struct mii_data * const mii = &sc->sc_mii; 572 struct cpunode_attach_args * const cna = aux; 573 struct cpunode_locators * const cnl = &cna->cna_locs; 574 cfdata_t cf = device_cfdata(self); 575 int error; 576 577 psc->sc_children |= cna->cna_childmask; 578 sc->sc_dev = self; 579 sc->sc_bst = cna->cna_memt; 580 sc->sc_dmat = &booke_bus_dma_tag; 581 582 /* 583 * Pull out the mdio bus and phy we are supposed to use. 584 */ 585 const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 586 const int phy = cf->cf_loc[CPUNODECF_PHY]; 587 if (mdio != CPUNODECF_MDIO_DEFAULT) 588 aprint_normal(" mdio %d", mdio); 589 590 /* 591 * See if the phy is in the config file... 592 */ 593 if (phy != CPUNODECF_PHY_DEFAULT) { 594 sc->sc_phy_addr = phy; 595 } else { 596 unsigned char prop_name[20]; 597 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 598 cnl->cnl_instance); 599 sc->sc_phy_addr = board_info_get_number(prop_name); 600 } 601 if (sc->sc_phy_addr != MII_PHY_ANY) 602 aprint_normal(" phy %d", sc->sc_phy_addr); 603 604 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 605 &sc->sc_bsh); 606 if (error) { 607 aprint_error(": error mapping registers: %d\n", error); 608 return; 609 } 610 611 /* 612 * Assume firmware has aready set the mac address and fetch it 613 * before we reinit it. 614 */ 615 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 616 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 617 sc->sc_rctrl = RCTRL_DEFAULT; 618 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 619 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 620 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 621 622 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 623 size_t len; 624 const uint8_t *mac_addr = 625 board_info_get_data("tsec-mac-addr-base", &len); 626 KASSERT(len == ETHER_ADDR_LEN); 627 sc->sc_macstnaddr2 = 628 (mac_addr[1] << 24) 629 | (mac_addr[0] << 16); 630 sc->sc_macstnaddr1 = 631 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 632 | (mac_addr[4] << 16) 633 | (mac_addr[3] << 8) 634 | (mac_addr[2] << 0); 635 #if 0 636 aprint_error(": mac-address unknown\n"); 637 return; 638 #endif 639 } 640 641 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 642 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 643 644 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 645 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 646 647 /* Disable interrupts */ 648 etsec_write(sc, IMASK, 0); 649 650 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 651 if (error) { 652 aprint_error(": failed to init rxq: %d\n", error); 653 goto fail_1; 654 } 655 656 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 657 if (error) { 658 aprint_error(": failed to init txq: %d\n", error); 659 goto fail_2; 660 } 661 662 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 663 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 664 if (error) { 665 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 666 goto fail_3; 667 } 668 669 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 670 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 671 if (error) { 672 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 673 goto fail_4; 674 } 675 676 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 677 pq3etsec_tx_intr, sc); 678 if (sc->sc_tx_ih == NULL) { 679 aprint_error(": failed to establish tx interrupt: %d\n", 680 cnl->cnl_intrs[0]); 681 goto fail_5; 682 } 683 684 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 685 pq3etsec_rx_intr, sc); 686 if (sc->sc_rx_ih == NULL) { 687 aprint_error(": failed to establish rx interrupt: %d\n", 688 cnl->cnl_intrs[1]); 689 goto fail_6; 690 } 691 692 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 693 pq3etsec_error_intr, sc); 694 if (sc->sc_error_ih == NULL) { 695 aprint_error(": failed to establish error interrupt: %d\n", 696 cnl->cnl_intrs[2]); 697 goto fail_7; 698 } 699 700 int softint_flags = SOFTINT_NET; 701 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 702 softint_flags |= SOFTINT_MPSAFE; 703 #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 704 sc->sc_soft_ih = softint_establish(softint_flags, 705 pq3etsec_soft_intr, sc); 706 if (sc->sc_soft_ih == NULL) { 707 aprint_error(": failed to establish soft interrupt\n"); 708 goto fail_8; 709 } 710 711 /* 712 * If there was no MDIO 713 */ 714 if (mdio == CPUNODECF_MDIO_DEFAULT) { 715 aprint_normal("\n"); 716 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna); 717 if (mdio_cf != NULL) { 718 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL); 719 } 720 } else { 721 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 722 if (sc->sc_mdio_dev == NULL) { 723 aprint_error(": failed to locate mdio device\n"); 724 goto fail_9; 725 } 726 aprint_normal("\n"); 727 } 728 729 etsec_write(sc, ATTR, ATTR_DEFAULT); 730 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 731 732 /* Enable interrupt coalesing */ 733 sc->sc_ic_rx_time = 768; 734 sc->sc_ic_rx_count = 16; 735 sc->sc_ic_tx_time = 768; 736 sc->sc_ic_tx_count = 16; 737 pq3etsec_set_ic_rx(sc); 738 pq3etsec_set_ic_tx(sc); 739 740 char enaddr[ETHER_ADDR_LEN] = { 741 [0] = sc->sc_macstnaddr2 >> 16, 742 [1] = sc->sc_macstnaddr2 >> 24, 743 [2] = sc->sc_macstnaddr1 >> 0, 744 [3] = sc->sc_macstnaddr1 >> 8, 745 [4] = sc->sc_macstnaddr1 >> 16, 746 [5] = sc->sc_macstnaddr1 >> 24, 747 }; 748 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 749 ether_sprintf(enaddr)); 750 751 const char * const xname = device_xname(sc->sc_dev); 752 struct ethercom * const ec = &sc->sc_ec; 753 struct ifnet * const ifp = &ec->ec_if; 754 755 ec->ec_mii = mii; 756 757 mii->mii_ifp = ifp; 758 mii->mii_readreg = pq3mdio_mii_readreg; 759 mii->mii_writereg = pq3mdio_mii_writereg; 760 mii->mii_statchg = pq3etsec_mii_statchg; 761 762 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 763 764 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 765 mii_attach(sc->sc_mdio_dev, mii, 0xffffffff, 766 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 767 768 if (LIST_FIRST(&mii->mii_phys) == NULL) { 769 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 770 0, NULL); 771 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 772 } else { 773 callout_schedule(&sc->sc_mii_callout, hz); 774 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 775 } 776 } else { 777 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 778 0, NULL); 779 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX); 780 } 781 782 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 783 | ETHERCAP_JUMBO_MTU; 784 ec->ec_capenable = ETHERCAP_VLAN_HWTAGGING; 785 786 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 787 ifp->if_softc = sc; 788 ifp->if_capabilities = IFCAP_ETSEC; 789 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 790 ifp->if_ioctl = pq3etsec_ifioctl; 791 ifp->if_start = pq3etsec_ifstart; 792 ifp->if_watchdog = pq3etsec_ifwatchdog; 793 ifp->if_init = pq3etsec_ifinit; 794 ifp->if_stop = pq3etsec_ifstop; 795 IFQ_SET_READY(&ifp->if_snd); 796 797 /* 798 * Attach the interface. 799 */ 800 error = if_initialize(ifp); 801 if (error != 0) { 802 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 803 error); 804 goto fail_10; 805 } 806 pq3etsec_sysctl_setup(NULL, sc); 807 ether_ifattach(ifp, enaddr); 808 if_register(ifp); 809 810 pq3etsec_ifstop(ifp, true); 811 812 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 813 NULL, xname, "rx stall"); 814 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 815 NULL, xname, "tx stall"); 816 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 817 NULL, xname, "tx intr"); 818 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 819 NULL, xname, "rx intr"); 820 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 821 NULL, xname, "error intr"); 822 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 823 NULL, xname, "soft intr"); 824 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 825 NULL, xname, "tx pause"); 826 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 827 NULL, xname, "rx pause"); 828 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 829 NULL, xname, "mii ticks"); 830 return; 831 832 fail_10: 833 ifmedia_removeall(&mii->mii_media); 834 mii_detach(mii, sc->sc_phy_addr, MII_OFFSET_ANY); 835 fail_9: 836 softint_disestablish(sc->sc_soft_ih); 837 fail_8: 838 intr_disestablish(sc->sc_error_ih); 839 fail_7: 840 intr_disestablish(sc->sc_rx_ih); 841 fail_6: 842 intr_disestablish(sc->sc_tx_ih); 843 fail_5: 844 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 845 fail_4: 846 pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache); 847 fail_3: 848 #if 0 /* notyet */ 849 pq3etsec_txq_detach(sc); 850 #endif 851 fail_2: 852 #if 0 /* notyet */ 853 pq3etsec_rxq_detach(sc); 854 #endif 855 fail_1: 856 callout_destroy(&sc->sc_mii_callout); 857 mutex_obj_free(sc->sc_lock); 858 mutex_obj_free(sc->sc_hwlock); 859 bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size); 860 } 861 862 static uint64_t 863 pq3etsec_macaddr_create(const uint8_t *lladdr) 864 { 865 uint64_t macaddr = 0; 866 867 lladdr += ETHER_ADDR_LEN; 868 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 869 macaddr = (macaddr << 8) | *--lladdr; 870 } 871 return macaddr << 16; 872 } 873 874 static int 875 pq3etsec_ifinit(struct ifnet *ifp) 876 { 877 struct pq3etsec_softc * const sc = ifp->if_softc; 878 int error = 0; 879 880 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 881 if (ifp->if_mtu > ETHERMTU_JUMBO) 882 return error; 883 884 KASSERT(ifp->if_flags & IFF_UP); 885 886 /* 887 * Stop the interface (steps 1 to 4 in the Soft Reset and 888 * Reconfigurating Procedure. 889 */ 890 pq3etsec_ifstop(ifp, 0); 891 892 /* 893 * If our frame size has changed (or it's our first time through) 894 * destroy the existing transmit mapcache. 895 */ 896 if (sc->sc_tx_mapcache != NULL 897 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 898 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 899 sc->sc_tx_mapcache = NULL; 900 } 901 902 if (sc->sc_tx_mapcache == NULL) { 903 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 904 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 905 if (error) 906 return error; 907 } 908 909 sc->sc_ev_mii_ticks.ev_count++; 910 mii_tick(&sc->sc_mii); 911 912 if (ifp->if_flags & IFF_PROMISC) { 913 sc->sc_rctrl |= RCTRL_PROM; 914 } else { 915 sc->sc_rctrl &= ~RCTRL_PROM; 916 } 917 918 uint32_t rctrl_prsdep = 0; 919 sc->sc_rctrl &= 920 ~(RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP); 921 if (VLAN_ATTACHED(&sc->sc_ec)) { 922 sc->sc_rctrl |= RCTRL_VLEX; 923 rctrl_prsdep = RCTRL_PRSDEP_L2; 924 } 925 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 926 sc->sc_rctrl |= RCTRL_IPCSEN; 927 rctrl_prsdep = RCTRL_PRSDEP_L3; 928 } 929 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 930 sc->sc_rctrl |= RCTRL_TUCSEN; 931 rctrl_prsdep = RCTRL_PRSDEP_L4; 932 } 933 sc->sc_rctrl |= rctrl_prsdep; 934 #if 0 935 if (sc->sc_rctrl 936 & (RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP)) 937 aprint_normal_dev(sc->sc_dev, 938 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 939 sc->sc_rctrl, 940 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 941 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 942 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 943 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 944 #endif 945 946 sc->sc_tctrl &= ~(TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS); 947 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 948 sc->sc_tctrl |= TCTRL_VLINS; 949 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 950 sc->sc_tctrl |= TCTRL_IPCSEN; 951 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 952 sc->sc_tctrl |= TCTRL_TUCSEN; 953 #if 0 954 if (sc->sc_tctrl & (TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS)) 955 aprint_normal_dev(sc->sc_dev, 956 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 957 sc->sc_tctrl, 958 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 959 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 960 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 961 #endif 962 963 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN | MACCFG1_RX_EN); 964 965 const uint64_t macstnaddr = 966 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 967 968 sc->sc_imask = IEVENT_DPE; 969 970 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 971 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 972 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 973 974 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 975 pq3etsec_txq_reset(sc, &sc->sc_txq); 976 977 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 978 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 979 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 980 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 981 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 982 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 983 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 984 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 985 986 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 987 pq3etsec_mc_setup(sc); 988 989 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 990 etsec_write(sc, MRBLR, MCLBYTES); 991 992 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 993 sc->sc_dmactrl |= DMACTRL_DEFAULT; 994 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 995 996 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 997 etsec_write(sc, TQUEUE, TQUEUE_EN0); 998 sc->sc_imask |= IEVENT_TXF | IEVENT_TXE | IEVENT_TXC; 999 1000 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 1001 1002 /* 12. Enable receive queues in RQUEUE, */ 1003 etsec_write(sc, RQUEUE, RQUEUE_EN0 | RQUEUE_EX0); 1004 sc->sc_imask |= IEVENT_RXF | IEVENT_BSY | IEVENT_RXC; 1005 1006 /* and optionally set TOE functionality in RCTRL. */ 1007 etsec_write(sc, RCTRL, sc->sc_rctrl); 1008 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 1009 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 1010 sc->sc_rx_adjlen += sizeof(struct rxfcb); 1011 1012 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 1013 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 1014 1015 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 1016 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 1017 1018 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 1019 sc->sc_dmactrl &= ~(DMACTRL_GRS | DMACTRL_GTS); 1020 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1021 1022 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 1023 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1024 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1025 1026 sc->sc_soft_flags = 0; 1027 1028 etsec_write(sc, IMASK, sc->sc_imask); 1029 1030 ifp->if_flags |= IFF_RUNNING; 1031 1032 return error; 1033 } 1034 1035 static void 1036 pq3etsec_ifstop(struct ifnet *ifp, int disable) 1037 { 1038 struct pq3etsec_softc * const sc = ifp->if_softc; 1039 1040 KASSERT(!cpu_intr_p()); 1041 const uint32_t imask_gsc_mask = IEVENT_GTSC | IEVENT_GRSC; 1042 /* 1043 * Clear the GTSC and GRSC from the interrupt mask until 1044 * we are ready for them. Then clear them from IEVENT, 1045 * request the graceful shutdown, and then enable the 1046 * GTSC and GRSC bits in the mask. This should cause the 1047 * error interrupt to fire which will issue a wakeup to 1048 * allow us to resume. 1049 */ 1050 1051 /* 1052 * 1. Set GRS/GTS bits in DMACTRL register 1053 */ 1054 sc->sc_dmactrl |= DMACTRL_GRS | DMACTRL_GTS; 1055 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1056 etsec_write(sc, IEVENT, imask_gsc_mask); 1057 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1058 1059 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN | MACCFG1_RX_EN)) { 1060 /* 1061 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1062 */ 1063 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1064 1065 u_int timo = 1000; 1066 uint32_t ievent = etsec_read(sc, IEVENT); 1067 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1068 if (--timo == 0) { 1069 aprint_error_dev(sc->sc_dev, 1070 "WARNING: " 1071 "request to stop failed (IEVENT=%#x)\n", 1072 ievent); 1073 break; 1074 } 1075 delay(10); 1076 ievent = etsec_read(sc, IEVENT); 1077 } 1078 } 1079 1080 /* 1081 * Now reset the controller. 1082 * 1083 * 3. Set SOFT_RESET bit in MACCFG1 register 1084 * 4. Clear SOFT_RESET bit in MACCFG1 register 1085 */ 1086 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1087 etsec_write(sc, MACCFG1, 0); 1088 etsec_write(sc, IMASK, 0); 1089 etsec_write(sc, IEVENT, ~0); 1090 sc->sc_imask = 0; 1091 ifp->if_flags &= ~IFF_RUNNING; 1092 1093 uint32_t tbipa = etsec_read(sc, TBIPA); 1094 if (tbipa == sc->sc_phy_addr) { 1095 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1096 etsec_write(sc, TBIPA, 0x1f); 1097 } 1098 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1099 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1100 etsec_write(sc, MIIMCFG, miimcfg); 1101 1102 /* 1103 * Let's consume any remaing transmitted packets. And if we are 1104 * disabling the interface, purge ourselves of any untransmitted 1105 * packets. But don't consume any received packets, just drop them. 1106 * If we aren't disabling the interface, save the mbufs in the 1107 * receive queue for reuse. 1108 */ 1109 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1110 pq3etsec_txq_consume(sc, &sc->sc_txq); 1111 if (disable) { 1112 pq3etsec_txq_purge(sc, &sc->sc_txq); 1113 IFQ_PURGE(&ifp->if_snd); 1114 } 1115 } 1116 1117 static void 1118 pq3etsec_ifwatchdog(struct ifnet *ifp) 1119 { 1120 } 1121 1122 static void 1123 pq3etsec_mc_setup( 1124 struct pq3etsec_softc *sc) 1125 { 1126 struct ethercom * const ec = &sc->sc_ec; 1127 struct ifnet * const ifp = &sc->sc_if; 1128 struct ether_multi *enm; 1129 struct ether_multistep step; 1130 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1131 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1132 1133 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1134 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1135 1136 ifp->if_flags &= ~IFF_ALLMULTI; 1137 1138 ETHER_LOCK(ec); 1139 ETHER_FIRST_MULTI(step, ec, enm); 1140 for (u_int i = 0; enm != NULL; ) { 1141 const char *addr = enm->enm_addrlo; 1142 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1143 ifp->if_flags |= IFF_ALLMULTI; 1144 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1145 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1146 break; 1147 } 1148 if ((sc->sc_rctrl & RCTRL_EMEN) 1149 && i < __arraycount(sc->sc_macaddrs)) { 1150 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1151 } else { 1152 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1153 #if 0 1154 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1155 ether_sprintf(addr), crc, 1156 crc >> crc_shift, 1157 crc >> (crc_shift + 5), 1158 (crc >> crc_shift) & 31, 1159 1 << (((crc >> crc_shift) & 31) ^ 31)); 1160 #endif 1161 /* 1162 * The documentation doesn't completely follow PowerPC 1163 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1164 * is 0x7fa32d9b. By empirical testing, the 1165 * corresponding hash bit is word 3, bit 31 (ppc bit 1166 * order). Since 3 << 31 | 31 is 0x7f, we deduce 1167 * H[0:2] selects the register while H[3:7] selects 1168 * the bit (ppc bit order). 1169 */ 1170 crc >>= crc_shift; 1171 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1172 } 1173 ETHER_NEXT_MULTI(step, enm); 1174 } 1175 ETHER_UNLOCK(ec); 1176 for (u_int i = 0; i < 8; i++) { 1177 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1178 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1179 #if 0 1180 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1181 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1182 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1183 i, GADDR(i), etsec_read(sc, GADDR(i))); 1184 #endif 1185 } 1186 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1187 uint64_t macaddr = sc->sc_macaddrs[i]; 1188 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1189 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1190 #if 0 1191 if (macaddr) 1192 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1193 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1194 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1195 #endif 1196 } 1197 } 1198 1199 static int 1200 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1201 { 1202 struct pq3etsec_softc *sc = ifp->if_softc; 1203 struct ifreq * const ifr = data; 1204 const int s = splnet(); 1205 int error; 1206 1207 switch (cmd) { 1208 case SIOCSIFMEDIA: 1209 /* Flow control requires full-duplex mode. */ 1210 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1211 (ifr->ifr_media & IFM_FDX) == 0) 1212 ifr->ifr_media &= ~IFM_ETH_FMASK; 1213 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1214 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1215 /* We can do both TXPAUSE and RXPAUSE. */ 1216 ifr->ifr_media |= 1217 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1218 } 1219 } 1220 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1221 break; 1222 1223 default: 1224 error = ether_ioctl(ifp, cmd, data); 1225 if (error != ENETRESET) 1226 break; 1227 1228 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1229 error = 0; 1230 if (ifp->if_flags & IFF_RUNNING) 1231 pq3etsec_mc_setup(sc); 1232 break; 1233 } 1234 error = pq3etsec_ifinit(ifp); 1235 break; 1236 } 1237 1238 splx(s); 1239 return error; 1240 } 1241 1242 static void 1243 pq3etsec_rxq_desc_presync( 1244 struct pq3etsec_softc *sc, 1245 struct pq3etsec_rxqueue *rxq, 1246 volatile struct rxbd *rxbd, 1247 size_t count) 1248 { 1249 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1250 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1251 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1252 } 1253 1254 static void 1255 pq3etsec_rxq_desc_postsync( 1256 struct pq3etsec_softc *sc, 1257 struct pq3etsec_rxqueue *rxq, 1258 volatile struct rxbd *rxbd, 1259 size_t count) 1260 { 1261 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1262 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1263 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1264 } 1265 1266 static void 1267 pq3etsec_txq_desc_presync( 1268 struct pq3etsec_softc *sc, 1269 struct pq3etsec_txqueue *txq, 1270 volatile struct txbd *txbd, 1271 size_t count) 1272 { 1273 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1274 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1275 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1276 } 1277 1278 static void 1279 pq3etsec_txq_desc_postsync( 1280 struct pq3etsec_softc *sc, 1281 struct pq3etsec_txqueue *txq, 1282 volatile struct txbd *txbd, 1283 size_t count) 1284 { 1285 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1286 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1287 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1288 } 1289 1290 static bus_dmamap_t 1291 pq3etsec_mapcache_get( 1292 struct pq3etsec_softc *sc, 1293 struct pq3etsec_mapcache *dmc) 1294 { 1295 KASSERT(dmc->dmc_nmaps > 0); 1296 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1297 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1298 } 1299 1300 static void 1301 pq3etsec_mapcache_put( 1302 struct pq3etsec_softc *sc, 1303 struct pq3etsec_mapcache *dmc, 1304 bus_dmamap_t map) 1305 { 1306 KASSERT(map != NULL); 1307 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1308 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1309 } 1310 1311 static void 1312 pq3etsec_mapcache_destroy( 1313 struct pq3etsec_softc *sc, 1314 struct pq3etsec_mapcache *dmc) 1315 { 1316 const size_t dmc_size = 1317 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1318 1319 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1320 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1321 } 1322 kmem_intr_free(dmc, dmc_size); 1323 } 1324 1325 static int 1326 pq3etsec_mapcache_create( 1327 struct pq3etsec_softc *sc, 1328 struct pq3etsec_mapcache **dmc_p, 1329 size_t maxmaps, 1330 size_t maxmapsize, 1331 size_t maxseg) 1332 { 1333 const size_t dmc_size = 1334 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1335 struct pq3etsec_mapcache * const dmc = 1336 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1337 1338 dmc->dmc_maxmaps = maxmaps; 1339 dmc->dmc_nmaps = maxmaps; 1340 dmc->dmc_maxmapsize = maxmapsize; 1341 dmc->dmc_maxseg = maxseg; 1342 1343 for (u_int i = 0; i < maxmaps; i++) { 1344 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1345 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1346 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1347 if (error) { 1348 aprint_error_dev(sc->sc_dev, 1349 "failed to creat dma map cache " 1350 "entry %u of %zu: %d\n", 1351 i, maxmaps, error); 1352 while (i-- > 0) { 1353 bus_dmamap_destroy(sc->sc_dmat, 1354 dmc->dmc_maps[i]); 1355 } 1356 kmem_intr_free(dmc, dmc_size); 1357 return error; 1358 } 1359 KASSERT(dmc->dmc_maps[i] != NULL); 1360 } 1361 1362 *dmc_p = dmc; 1363 1364 return 0; 1365 } 1366 1367 #if 0 1368 static void 1369 pq3etsec_dmamem_free( 1370 bus_dma_tag_t dmat, 1371 size_t map_size, 1372 bus_dma_segment_t *seg, 1373 bus_dmamap_t map, 1374 void *kvap) 1375 { 1376 bus_dmamap_destroy(dmat, map); 1377 bus_dmamem_unmap(dmat, kvap, map_size); 1378 bus_dmamem_free(dmat, seg, 1); 1379 } 1380 #endif 1381 1382 static int 1383 pq3etsec_dmamem_alloc( 1384 bus_dma_tag_t dmat, 1385 size_t map_size, 1386 bus_dma_segment_t *seg, 1387 bus_dmamap_t *map, 1388 void **kvap) 1389 { 1390 int error; 1391 int nseg; 1392 1393 *kvap = NULL; 1394 *map = NULL; 1395 1396 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1397 seg, 1, &nseg, 0); 1398 if (error) 1399 return error; 1400 1401 KASSERT(nseg == 1); 1402 1403 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1404 BUS_DMA_COHERENT); 1405 if (error == 0) { 1406 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1407 map); 1408 if (error == 0) { 1409 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1410 NULL, 0); 1411 if (error == 0) 1412 return 0; 1413 bus_dmamap_destroy(dmat, *map); 1414 *map = NULL; 1415 } 1416 bus_dmamem_unmap(dmat, *kvap, map_size); 1417 *kvap = NULL; 1418 } 1419 bus_dmamem_free(dmat, seg, nseg); 1420 return 0; 1421 } 1422 1423 static struct mbuf * 1424 pq3etsec_rx_buf_alloc( 1425 struct pq3etsec_softc *sc) 1426 { 1427 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1428 if (m == NULL) { 1429 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1430 return NULL; 1431 } 1432 MCLGET(m, M_DONTWAIT); 1433 if ((m->m_flags & M_EXT) == 0) { 1434 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1435 m_freem(m); 1436 return NULL; 1437 } 1438 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1439 1440 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1441 if (map == NULL) { 1442 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1443 m_freem(m); 1444 return NULL; 1445 } 1446 M_SETCTX(m, map); 1447 m->m_len = m->m_pkthdr.len = MCLBYTES; 1448 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1449 BUS_DMA_READ | BUS_DMA_NOWAIT); 1450 if (error) { 1451 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1452 error); 1453 M_SETCTX(m, NULL); 1454 m_freem(m); 1455 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1456 return NULL; 1457 } 1458 KASSERT(map->dm_mapsize == MCLBYTES); 1459 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1460 BUS_DMASYNC_PREREAD); 1461 1462 return m; 1463 } 1464 1465 static void 1466 pq3etsec_rx_map_unload( 1467 struct pq3etsec_softc *sc, 1468 struct mbuf *m) 1469 { 1470 KASSERT(m); 1471 for (; m != NULL; m = m->m_next) { 1472 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1473 KASSERT(map); 1474 KASSERT(map->dm_mapsize == MCLBYTES); 1475 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1476 BUS_DMASYNC_POSTREAD); 1477 bus_dmamap_unload(sc->sc_dmat, map); 1478 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1479 M_SETCTX(m, NULL); 1480 } 1481 } 1482 1483 static bool 1484 pq3etsec_rxq_produce( 1485 struct pq3etsec_softc *sc, 1486 struct pq3etsec_rxqueue *rxq) 1487 { 1488 volatile struct rxbd *producer = rxq->rxq_producer; 1489 #if 0 1490 size_t inuse = rxq->rxq_inuse; 1491 #endif 1492 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1493 struct mbuf *m; 1494 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1495 if (m == NULL) { 1496 m = pq3etsec_rx_buf_alloc(sc); 1497 if (m == NULL) { 1498 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1499 break; 1500 } 1501 } 1502 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1503 KASSERT(map); 1504 1505 #ifdef ETSEC_DEBUG 1506 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1507 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1508 #endif 1509 1510 /* rxbd_len is write-only by the ETSEC */ 1511 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1512 membar_producer(); 1513 producer->rxbd_flags |= RXBD_E; 1514 if (__predict_false(rxq->rxq_mhead == NULL)) { 1515 KASSERT(producer == rxq->rxq_consumer); 1516 rxq->rxq_mconsumer = m; 1517 } 1518 *rxq->rxq_mtail = m; 1519 rxq->rxq_mtail = &m->m_next; 1520 m->m_len = MCLBYTES; 1521 m->m_next = NULL; 1522 rxq->rxq_inuse++; 1523 if (++producer == rxq->rxq_last) { 1524 membar_producer(); 1525 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1526 rxq->rxq_last - rxq->rxq_producer); 1527 producer = rxq->rxq_producer = rxq->rxq_first; 1528 } 1529 } 1530 if (producer != rxq->rxq_producer) { 1531 membar_producer(); 1532 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1533 producer - rxq->rxq_producer); 1534 rxq->rxq_producer = producer; 1535 } 1536 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1537 if (qhlt) { 1538 KASSERT(qhlt & rxq->rxq_qmask); 1539 sc->sc_ev_rx_stall.ev_count++; 1540 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1541 } 1542 #if 0 1543 aprint_normal_dev(sc->sc_dev, 1544 "%s: buffers inuse went from %zu to %zu\n", 1545 __func__, inuse, rxq->rxq_inuse); 1546 #endif 1547 return true; 1548 } 1549 1550 static bool 1551 pq3etsec_rx_offload( 1552 struct pq3etsec_softc *sc, 1553 struct mbuf *m, 1554 const struct rxfcb *fcb) 1555 { 1556 if (fcb->rxfcb_flags & RXFCB_VLN) { 1557 vlan_set_tag(m, fcb->rxfcb_vlctl); 1558 } 1559 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1560 || (fcb->rxfcb_flags & (RXFCB_CIP | RXFCB_CTU)) == 0) 1561 return true; 1562 int csum_flags = 0; 1563 if ((fcb->rxfcb_flags & (RXFCB_IP6 | RXFCB_CIP)) == RXFCB_CIP) { 1564 csum_flags |= M_CSUM_IPv4; 1565 if (fcb->rxfcb_flags & RXFCB_EIP) 1566 csum_flags |= M_CSUM_IPv4_BAD; 1567 } 1568 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1569 int ipv_flags; 1570 if (fcb->rxfcb_flags & RXFCB_IP6) 1571 ipv_flags = M_CSUM_TCPv6 | M_CSUM_UDPv6; 1572 else 1573 ipv_flags = M_CSUM_TCPv4 | M_CSUM_UDPv4; 1574 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1575 csum_flags |= (M_CSUM_TCPv4 |M_CSUM_TCPv6) & ipv_flags; 1576 } else { 1577 csum_flags |= (M_CSUM_UDPv4 |M_CSUM_UDPv6) & ipv_flags; 1578 } 1579 if (fcb->rxfcb_flags & RXFCB_ETU) 1580 csum_flags |= M_CSUM_TCP_UDP_BAD; 1581 } 1582 1583 m->m_pkthdr.csum_flags = csum_flags; 1584 return true; 1585 } 1586 1587 static void 1588 pq3etsec_rx_input( 1589 struct pq3etsec_softc *sc, 1590 struct mbuf *m, 1591 uint16_t rxbd_flags) 1592 { 1593 struct ifnet * const ifp = &sc->sc_if; 1594 1595 pq3etsec_rx_map_unload(sc, m); 1596 1597 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1598 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1599 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1600 return; 1601 } 1602 m_adj(m, sc->sc_rx_adjlen); 1603 1604 if (rxbd_flags & RXBD_M) 1605 m->m_flags |= M_PROMISC; 1606 if (rxbd_flags & RXBD_BC) 1607 m->m_flags |= M_BCAST; 1608 if (rxbd_flags & RXBD_MC) 1609 m->m_flags |= M_MCAST; 1610 m->m_flags |= M_HASFCS; 1611 m_set_rcvif(m, &sc->sc_if); 1612 1613 /* 1614 * Let's give it to the network subsystm to deal with. 1615 */ 1616 int s = splnet(); 1617 if_input(ifp, m); 1618 splx(s); 1619 } 1620 1621 static void 1622 pq3etsec_rxq_consume( 1623 struct pq3etsec_softc *sc, 1624 struct pq3etsec_rxqueue *rxq) 1625 { 1626 struct ifnet * const ifp = &sc->sc_if; 1627 volatile struct rxbd *consumer = rxq->rxq_consumer; 1628 size_t rxconsumed = 0; 1629 1630 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1631 1632 for (;;) { 1633 if (consumer == rxq->rxq_producer) { 1634 rxq->rxq_consumer = consumer; 1635 rxq->rxq_inuse -= rxconsumed; 1636 KASSERT(rxq->rxq_inuse == 0); 1637 return; 1638 } 1639 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1640 const uint16_t rxbd_flags = consumer->rxbd_flags; 1641 if (rxbd_flags & RXBD_E) { 1642 rxq->rxq_consumer = consumer; 1643 rxq->rxq_inuse -= rxconsumed; 1644 return; 1645 } 1646 KASSERT(rxq->rxq_mconsumer != NULL); 1647 #ifdef ETSEC_DEBUG 1648 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1649 #endif 1650 #if 0 1651 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1652 __func__, 1653 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1654 mtod(rxq->rxq_mconsumer, int *)[0], 1655 mtod(rxq->rxq_mconsumer, int *)[1], 1656 mtod(rxq->rxq_mconsumer, int *)[2], 1657 mtod(rxq->rxq_mconsumer, int *)[3]); 1658 #endif 1659 /* 1660 * We own this packet again. Clear all flags except wrap. 1661 */ 1662 rxconsumed++; 1663 consumer->rxbd_flags = rxbd_flags & (RXBD_W | RXBD_I); 1664 1665 /* 1666 * If this descriptor has the LAST bit set and no errors, 1667 * it's a valid input packet. 1668 */ 1669 if ((rxbd_flags & (RXBD_L | RXBD_ERRORS)) == RXBD_L) { 1670 size_t rxbd_len = consumer->rxbd_len; 1671 struct mbuf *m = rxq->rxq_mhead; 1672 struct mbuf *m_last = rxq->rxq_mconsumer; 1673 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1674 rxq->rxq_mtail = &rxq->rxq_mhead; 1675 rxq->rxq_mconsumer = rxq->rxq_mhead; 1676 m_last->m_next = NULL; 1677 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1678 m->m_pkthdr.len = rxbd_len; 1679 pq3etsec_rx_input(sc, m, rxbd_flags); 1680 } else if (rxbd_flags & RXBD_L) { 1681 KASSERT(rxbd_flags & RXBD_ERRORS); 1682 struct mbuf *m; 1683 /* 1684 * We encountered an error, take the mbufs and add 1685 * then to the rx bufcache so we can reuse them. 1686 */ 1687 if_statinc(ifp, if_ierrors); 1688 for (m = rxq->rxq_mhead; 1689 m != rxq->rxq_mconsumer; 1690 m = m->m_next) { 1691 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1692 } 1693 m = rxq->rxq_mconsumer; 1694 if ((rxq->rxq_mhead = m->m_next) == NULL) 1695 rxq->rxq_mtail = &rxq->rxq_mhead; 1696 rxq->rxq_mconsumer = m->m_next; 1697 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1698 } else { 1699 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1700 } 1701 #ifdef ETSEC_DEBUG 1702 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1703 #endif 1704 1705 /* 1706 * Wrap at the last entry! 1707 */ 1708 if (rxbd_flags & RXBD_W) { 1709 KASSERT(consumer + 1 == rxq->rxq_last); 1710 consumer = rxq->rxq_first; 1711 } else { 1712 consumer++; 1713 } 1714 #ifdef ETSEC_DEBUG 1715 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1716 #endif 1717 } 1718 } 1719 1720 static void 1721 pq3etsec_rxq_purge( 1722 struct pq3etsec_softc *sc, 1723 struct pq3etsec_rxqueue *rxq, 1724 bool discard) 1725 { 1726 struct mbuf *m; 1727 1728 if ((m = rxq->rxq_mhead) != NULL) { 1729 #ifdef ETSEC_DEBUG 1730 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1731 #endif 1732 1733 if (discard) { 1734 pq3etsec_rx_map_unload(sc, m); 1735 m_freem(m); 1736 } else { 1737 while (m != NULL) { 1738 struct mbuf *m0 = m->m_next; 1739 m->m_next = NULL; 1740 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1741 m = m0; 1742 } 1743 } 1744 } 1745 1746 rxq->rxq_mconsumer = NULL; 1747 rxq->rxq_mhead = NULL; 1748 rxq->rxq_mtail = &rxq->rxq_mhead; 1749 rxq->rxq_inuse = 0; 1750 } 1751 1752 static void 1753 pq3etsec_rxq_reset( 1754 struct pq3etsec_softc *sc, 1755 struct pq3etsec_rxqueue *rxq) 1756 { 1757 /* 1758 * sync all the descriptors 1759 */ 1760 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1761 rxq->rxq_last - rxq->rxq_first); 1762 1763 /* 1764 * Make sure we own all descriptors in the ring. 1765 */ 1766 volatile struct rxbd *rxbd; 1767 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1768 rxbd->rxbd_flags = RXBD_I; 1769 } 1770 1771 /* 1772 * Last descriptor has the wrap flag. 1773 */ 1774 rxbd->rxbd_flags = RXBD_W | RXBD_I; 1775 1776 /* 1777 * Reset the producer consumer indexes. 1778 */ 1779 rxq->rxq_consumer = rxq->rxq_first; 1780 rxq->rxq_producer = rxq->rxq_first; 1781 rxq->rxq_inuse = 0; 1782 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1783 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1784 1785 sc->sc_imask |= IEVENT_RXF | IEVENT_BSY; 1786 1787 /* 1788 * Restart the transmit at the first descriptor 1789 */ 1790 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1791 } 1792 1793 static int 1794 pq3etsec_rxq_attach( 1795 struct pq3etsec_softc *sc, 1796 struct pq3etsec_rxqueue *rxq, 1797 u_int qno) 1798 { 1799 size_t map_size = PAGE_SIZE; 1800 size_t desc_count = map_size / sizeof(struct rxbd); 1801 int error; 1802 void *descs; 1803 1804 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1805 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1806 if (error) 1807 return error; 1808 1809 memset(descs, 0, map_size); 1810 rxq->rxq_first = descs; 1811 rxq->rxq_last = rxq->rxq_first + desc_count; 1812 rxq->rxq_consumer = descs; 1813 rxq->rxq_producer = descs; 1814 1815 pq3etsec_rxq_purge(sc, rxq, true); 1816 pq3etsec_rxq_reset(sc, rxq); 1817 1818 rxq->rxq_reg_rbase = RBASEn(qno); 1819 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1820 1821 return 0; 1822 } 1823 1824 static bool 1825 pq3etsec_txq_active_p( 1826 struct pq3etsec_softc * const sc, 1827 struct pq3etsec_txqueue *txq) 1828 { 1829 return !IF_IS_EMPTY(&txq->txq_mbufs); 1830 } 1831 1832 static bool 1833 pq3etsec_txq_fillable_p( 1834 struct pq3etsec_softc * const sc, 1835 struct pq3etsec_txqueue *txq) 1836 { 1837 return txq->txq_free >= txq->txq_threshold; 1838 } 1839 1840 static int 1841 pq3etsec_txq_attach( 1842 struct pq3etsec_softc *sc, 1843 struct pq3etsec_txqueue *txq, 1844 u_int qno) 1845 { 1846 size_t map_size = PAGE_SIZE; 1847 size_t desc_count = map_size / sizeof(struct txbd); 1848 int error; 1849 void *descs; 1850 1851 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1852 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1853 if (error) 1854 return error; 1855 1856 memset(descs, 0, map_size); 1857 txq->txq_first = descs; 1858 txq->txq_last = txq->txq_first + desc_count; 1859 txq->txq_consumer = descs; 1860 txq->txq_producer = descs; 1861 1862 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1863 1864 txq->txq_reg_tbase = TBASEn(qno); 1865 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1866 1867 pq3etsec_txq_reset(sc, txq); 1868 1869 return 0; 1870 } 1871 1872 static int 1873 pq3etsec_txq_map_load( 1874 struct pq3etsec_softc *sc, 1875 struct pq3etsec_txqueue *txq, 1876 struct mbuf *m) 1877 { 1878 bus_dmamap_t map; 1879 int error; 1880 1881 map = M_GETCTX(m, bus_dmamap_t); 1882 if (map != NULL) 1883 return 0; 1884 1885 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1886 if (map == NULL) 1887 return ENOMEM; 1888 1889 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1890 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1891 if (error) 1892 return error; 1893 1894 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1895 BUS_DMASYNC_PREWRITE); 1896 M_SETCTX(m, map); 1897 return 0; 1898 } 1899 1900 static void 1901 pq3etsec_txq_map_unload( 1902 struct pq3etsec_softc *sc, 1903 struct pq3etsec_txqueue *txq, 1904 struct mbuf *m) 1905 { 1906 KASSERT(m); 1907 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1908 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1909 BUS_DMASYNC_POSTWRITE); 1910 bus_dmamap_unload(sc->sc_dmat, map); 1911 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1912 } 1913 1914 static bool 1915 pq3etsec_txq_produce( 1916 struct pq3etsec_softc *sc, 1917 struct pq3etsec_txqueue *txq, 1918 struct mbuf *m) 1919 { 1920 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1921 1922 if (map->dm_nsegs > txq->txq_free) 1923 return false; 1924 1925 /* 1926 * TCP Offload flag must be set in the first descriptor. 1927 */ 1928 volatile struct txbd *producer = txq->txq_producer; 1929 uint16_t last_flags = TXBD_L; 1930 uint16_t first_flags = TXBD_R 1931 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1932 1933 /* 1934 * If we've produced enough descriptors without consuming any 1935 * we need to ask for an interrupt to reclaim some. 1936 */ 1937 txq->txq_lastintr += map->dm_nsegs; 1938 if (ETSEC_IC_TX_ENABLED(sc) 1939 || txq->txq_lastintr >= txq->txq_threshold 1940 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1941 txq->txq_lastintr = 0; 1942 last_flags |= TXBD_I; 1943 } 1944 1945 #ifdef ETSEC_DEBUG 1946 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1947 #endif 1948 KASSERT(producer != txq->txq_last); 1949 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1950 producer->txbd_len = map->dm_segs[0].ds_len; 1951 1952 if (map->dm_nsegs > 1) { 1953 volatile struct txbd *start = producer + 1; 1954 size_t count = map->dm_nsegs - 1; 1955 for (u_int i = 1; i < map->dm_nsegs; i++) { 1956 if (__predict_false(++producer == txq->txq_last)) { 1957 producer = txq->txq_first; 1958 if (start < txq->txq_last) { 1959 pq3etsec_txq_desc_presync(sc, txq, 1960 start, txq->txq_last - start); 1961 count -= txq->txq_last - start; 1962 } 1963 start = txq->txq_first; 1964 } 1965 #ifdef ETSEC_DEBUG 1966 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1967 #endif 1968 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1969 producer->txbd_len = map->dm_segs[i].ds_len; 1970 producer->txbd_flags = TXBD_R 1971 | (producer->txbd_flags & TXBD_W) 1972 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1973 #if 0 1974 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1975 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1976 #endif 1977 } 1978 pq3etsec_txq_desc_presync(sc, txq, start, count); 1979 } else { 1980 first_flags |= last_flags; 1981 } 1982 1983 membar_producer(); 1984 txq->txq_producer->txbd_flags = 1985 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1986 #if 0 1987 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1988 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1989 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1990 #endif 1991 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1992 1993 /* 1994 * Reduce free count by the number of segments we consumed. 1995 */ 1996 txq->txq_free -= map->dm_nsegs; 1997 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1998 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1999 KASSERT(producer->txbd_flags & TXBD_L); 2000 #ifdef ETSEC_DEBUG 2001 txq->txq_lmbufs[producer - txq->txq_first] = m; 2002 #endif 2003 2004 #if 0 2005 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 2006 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 2007 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 2008 #endif 2009 2010 if (++producer == txq->txq_last) 2011 txq->txq_producer = txq->txq_first; 2012 else 2013 txq->txq_producer = producer; 2014 IF_ENQUEUE(&txq->txq_mbufs, m); 2015 2016 /* 2017 * Restart the transmitter. 2018 */ 2019 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 2020 2021 return true; 2022 } 2023 2024 static void 2025 pq3etsec_tx_offload( 2026 struct pq3etsec_softc *sc, 2027 struct pq3etsec_txqueue *txq, 2028 struct mbuf **mp) 2029 { 2030 struct mbuf *m = *mp; 2031 u_int csum_flags = m->m_pkthdr.csum_flags; 2032 bool have_vtag; 2033 uint16_t vtag; 2034 2035 KASSERT(m->m_flags & M_PKTHDR); 2036 2037 have_vtag = vlan_has_tag(m); 2038 vtag = (have_vtag) ? vlan_get_tag(m) : 0; 2039 2040 /* 2041 * Let see if we are doing any offload first. 2042 */ 2043 if (csum_flags == 0 && !have_vtag) { 2044 m->m_flags &= ~M_HASFCB; 2045 return; 2046 } 2047 2048 uint16_t flags = 0; 2049 if (csum_flags & M_CSUM_IP) { 2050 flags |= TXFCB_IP 2051 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2052 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2053 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2054 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2055 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2056 } 2057 if (have_vtag) { 2058 flags |= TXFCB_VLN; 2059 } 2060 if (flags == 0) { 2061 m->m_flags &= ~M_HASFCB; 2062 return; 2063 } 2064 2065 struct txfcb fcb; 2066 fcb.txfcb_flags = flags; 2067 if (csum_flags & M_CSUM_IPv4) 2068 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2069 else 2070 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2071 fcb.txfcb_l3os = ETHER_HDR_LEN; 2072 fcb.txfcb_phcs = 0; 2073 fcb.txfcb_vlctl = vtag; 2074 2075 #if 0 2076 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2077 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2078 fcb.txfcb_phcs, fcb.txfcb_vlctl); 2079 #endif 2080 2081 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2082 m->m_data -= sizeof(fcb); 2083 m->m_len += sizeof(fcb); 2084 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2085 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2086 m->m_data = m->m_pktdat; 2087 m->m_len += sizeof(fcb); 2088 } else { 2089 struct mbuf *mn; 2090 MGET(mn, M_DONTWAIT, m->m_type); 2091 if (mn == NULL) { 2092 if (csum_flags & M_CSUM_IP4) { 2093 #ifdef INET 2094 in_undefer_cksum(m, ETHER_HDR_LEN, 2095 csum_flags & M_CSUM_IP4); 2096 #else 2097 panic("%s: impossible M_CSUM flags %#x", 2098 device_xname(sc->sc_dev), csum_flags); 2099 #endif 2100 } else if (csum_flags & M_CSUM_IP6) { 2101 #ifdef INET6 2102 in6_undefer_cksum(m, ETHER_HDR_LEN, 2103 csum_flags & M_CSUM_IP6); 2104 #else 2105 panic("%s: impossible M_CSUM flags %#x", 2106 device_xname(sc->sc_dev), csum_flags); 2107 #endif 2108 } 2109 2110 m->m_flags &= ~M_HASFCB; 2111 return; 2112 } 2113 2114 m_move_pkthdr(mn, m); 2115 mn->m_next = m; 2116 m = mn; 2117 m_align(m, sizeof(fcb)); 2118 m->m_len = sizeof(fcb); 2119 *mp = m; 2120 } 2121 m->m_pkthdr.len += sizeof(fcb); 2122 m->m_flags |= M_HASFCB; 2123 *mtod(m, struct txfcb *) = fcb; 2124 return; 2125 } 2126 2127 static bool 2128 pq3etsec_txq_enqueue( 2129 struct pq3etsec_softc *sc, 2130 struct pq3etsec_txqueue *txq) 2131 { 2132 for (;;) { 2133 if (IF_QFULL(&txq->txq_mbufs)) 2134 return false; 2135 struct mbuf *m = txq->txq_next; 2136 if (m == NULL) { 2137 int s = splnet(); 2138 IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2139 splx(s); 2140 if (m == NULL) 2141 return true; 2142 M_SETCTX(m, NULL); 2143 pq3etsec_tx_offload(sc, txq, &m); 2144 } else { 2145 txq->txq_next = NULL; 2146 } 2147 int error = pq3etsec_txq_map_load(sc, txq, m); 2148 if (error) { 2149 aprint_error_dev(sc->sc_dev, 2150 "discarded packet due to " 2151 "dmamap load failure: %d\n", error); 2152 m_freem(m); 2153 continue; 2154 } 2155 KASSERT(txq->txq_next == NULL); 2156 if (!pq3etsec_txq_produce(sc, txq, m)) { 2157 txq->txq_next = m; 2158 return false; 2159 } 2160 KASSERT(txq->txq_next == NULL); 2161 } 2162 } 2163 2164 static bool 2165 pq3etsec_txq_consume( 2166 struct pq3etsec_softc *sc, 2167 struct pq3etsec_txqueue *txq) 2168 { 2169 struct ifnet * const ifp = &sc->sc_if; 2170 volatile struct txbd *consumer = txq->txq_consumer; 2171 size_t txfree = 0; 2172 2173 #if 0 2174 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2175 #endif 2176 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2177 2178 for (;;) { 2179 if (consumer == txq->txq_producer) { 2180 txq->txq_consumer = consumer; 2181 txq->txq_free += txfree; 2182 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2183 #if 0 2184 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2185 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2186 #endif 2187 KASSERT(txq->txq_lastintr == 0); 2188 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2189 return true; 2190 } 2191 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2192 const uint16_t txbd_flags = consumer->txbd_flags; 2193 if (txbd_flags & TXBD_R) { 2194 txq->txq_consumer = consumer; 2195 txq->txq_free += txfree; 2196 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2197 #if 0 2198 printf("%s: freed %zu descriptors\n", 2199 __func__, txfree); 2200 #endif 2201 return pq3etsec_txq_fillable_p(sc, txq); 2202 } 2203 2204 /* 2205 * If this is the last descriptor in the chain, get the 2206 * mbuf, free its dmamap, and free the mbuf chain itself. 2207 */ 2208 if (txbd_flags & TXBD_L) { 2209 struct mbuf *m; 2210 2211 IF_DEQUEUE(&txq->txq_mbufs, m); 2212 #ifdef ETSEC_DEBUG 2213 KASSERTMSG( 2214 m == txq->txq_lmbufs[consumer-txq->txq_first], 2215 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2216 __func__, consumer, consumer - txq->txq_first, 2217 txbd_flags, m, 2218 &txq->txq_lmbufs[consumer-txq->txq_first], 2219 txq->txq_lmbufs[consumer-txq->txq_first]); 2220 #endif 2221 KASSERT(m); 2222 pq3etsec_txq_map_unload(sc, txq, m); 2223 #if 0 2224 printf("%s: mbuf %p: consumed a %u byte packet\n", 2225 __func__, m, m->m_pkthdr.len); 2226 #endif 2227 if (m->m_flags & M_HASFCB) 2228 m_adj(m, sizeof(struct txfcb)); 2229 bpf_mtap(ifp, m, BPF_D_OUT); 2230 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2231 if_statinc_ref(nsr, if_opackets); 2232 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2233 if (m->m_flags & M_MCAST) 2234 if_statinc_ref(nsr, if_omcasts); 2235 if (txbd_flags & TXBD_ERRORS) 2236 if_statinc_ref(nsr, if_oerrors); 2237 IF_STAT_PUTREF(ifp); 2238 m_freem(m); 2239 #ifdef ETSEC_DEBUG 2240 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2241 #endif 2242 } else { 2243 #ifdef ETSEC_DEBUG 2244 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2245 #endif 2246 } 2247 2248 /* 2249 * We own this packet again. Clear all flags except wrap. 2250 */ 2251 txfree++; 2252 //consumer->txbd_flags = txbd_flags & TXBD_W; 2253 2254 /* 2255 * Wrap at the last entry! 2256 */ 2257 if (txbd_flags & TXBD_W) { 2258 KASSERT(consumer + 1 == txq->txq_last); 2259 consumer = txq->txq_first; 2260 } else { 2261 consumer++; 2262 KASSERT(consumer < txq->txq_last); 2263 } 2264 } 2265 } 2266 2267 static void 2268 pq3etsec_txq_purge( 2269 struct pq3etsec_softc *sc, 2270 struct pq3etsec_txqueue *txq) 2271 { 2272 struct mbuf *m; 2273 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2274 2275 for (;;) { 2276 IF_DEQUEUE(&txq->txq_mbufs, m); 2277 if (m == NULL) 2278 break; 2279 pq3etsec_txq_map_unload(sc, txq, m); 2280 m_freem(m); 2281 } 2282 if ((m = txq->txq_next) != NULL) { 2283 txq->txq_next = NULL; 2284 pq3etsec_txq_map_unload(sc, txq, m); 2285 m_freem(m); 2286 } 2287 #ifdef ETSEC_DEBUG 2288 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2289 #endif 2290 } 2291 2292 static void 2293 pq3etsec_txq_reset( 2294 struct pq3etsec_softc *sc, 2295 struct pq3etsec_txqueue *txq) 2296 { 2297 /* 2298 * sync all the descriptors 2299 */ 2300 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2301 txq->txq_last - txq->txq_first); 2302 2303 /* 2304 * Make sure we own all descriptors in the ring. 2305 */ 2306 volatile struct txbd *txbd; 2307 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2308 txbd->txbd_flags = 0; 2309 } 2310 2311 /* 2312 * Last descriptor has the wrap flag. 2313 */ 2314 txbd->txbd_flags = TXBD_W; 2315 2316 /* 2317 * Reset the producer consumer indexes. 2318 */ 2319 txq->txq_consumer = txq->txq_first; 2320 txq->txq_producer = txq->txq_first; 2321 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2322 txq->txq_threshold = txq->txq_free / 2; 2323 txq->txq_lastintr = 0; 2324 2325 /* 2326 * What do we want to get interrupted on? 2327 */ 2328 sc->sc_imask |= IEVENT_TXF | IEVENT_TXE; 2329 2330 /* 2331 * Restart the transmit at the first descriptor 2332 */ 2333 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2334 } 2335 2336 static void 2337 pq3etsec_ifstart(struct ifnet *ifp) 2338 { 2339 struct pq3etsec_softc * const sc = ifp->if_softc; 2340 2341 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) { 2342 return; 2343 } 2344 2345 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2346 softint_schedule(sc->sc_soft_ih); 2347 } 2348 2349 static void 2350 pq3etsec_tx_error( 2351 struct pq3etsec_softc * const sc) 2352 { 2353 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2354 2355 pq3etsec_txq_consume(sc, txq); 2356 2357 if (pq3etsec_txq_fillable_p(sc, txq)) 2358 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2359 if (sc->sc_txerrors 2360 & (IEVENT_LC | IEVENT_CRL | IEVENT_XFUN | IEVENT_BABT)) { 2361 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2362 } 2363 2364 if (pq3etsec_txq_active_p(sc, txq)) 2365 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2366 if (!pq3etsec_txq_enqueue(sc, txq)) { 2367 sc->sc_ev_tx_stall.ev_count++; 2368 sc->sc_if.if_flags |= IFF_OACTIVE; 2369 } 2370 2371 sc->sc_txerrors = 0; 2372 } 2373 2374 int 2375 pq3etsec_tx_intr(void *arg) 2376 { 2377 struct pq3etsec_softc * const sc = arg; 2378 2379 mutex_enter(sc->sc_hwlock); 2380 2381 sc->sc_ev_tx_intr.ev_count++; 2382 2383 uint32_t ievent = etsec_read(sc, IEVENT); 2384 ievent &= IEVENT_TXF | IEVENT_TXB; 2385 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2386 2387 #if 0 2388 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2389 __func__, ievent, etsec_read(sc, IMASK)); 2390 #endif 2391 2392 if (ievent == 0) { 2393 mutex_exit(sc->sc_hwlock); 2394 return 0; 2395 } 2396 2397 sc->sc_imask &= ~(IEVENT_TXF | IEVENT_TXB); 2398 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2399 etsec_write(sc, IMASK, sc->sc_imask); 2400 softint_schedule(sc->sc_soft_ih); 2401 2402 mutex_exit(sc->sc_hwlock); 2403 2404 return 1; 2405 } 2406 2407 int 2408 pq3etsec_rx_intr(void *arg) 2409 { 2410 struct pq3etsec_softc * const sc = arg; 2411 2412 mutex_enter(sc->sc_hwlock); 2413 2414 sc->sc_ev_rx_intr.ev_count++; 2415 2416 uint32_t ievent = etsec_read(sc, IEVENT); 2417 ievent &= IEVENT_RXF | IEVENT_RXB; 2418 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2419 if (ievent == 0) { 2420 mutex_exit(sc->sc_hwlock); 2421 return 0; 2422 } 2423 2424 #if 0 2425 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2426 #endif 2427 2428 sc->sc_imask &= ~(IEVENT_RXF | IEVENT_RXB); 2429 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2430 etsec_write(sc, IMASK, sc->sc_imask); 2431 softint_schedule(sc->sc_soft_ih); 2432 2433 mutex_exit(sc->sc_hwlock); 2434 2435 return 1; 2436 } 2437 2438 int 2439 pq3etsec_error_intr(void *arg) 2440 { 2441 struct pq3etsec_softc * const sc = arg; 2442 2443 mutex_enter(sc->sc_hwlock); 2444 2445 sc->sc_ev_error_intr.ev_count++; 2446 2447 for (int rv = 0, soft_flags = 0;; rv = 1) { 2448 uint32_t ievent = etsec_read(sc, IEVENT); 2449 ievent &= ~(IEVENT_RXF | IEVENT_RXB | IEVENT_TXF | IEVENT_TXB); 2450 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2451 if (ievent == 0) { 2452 if (soft_flags) { 2453 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2454 softint_schedule(sc->sc_soft_ih); 2455 } 2456 mutex_exit(sc->sc_hwlock); 2457 return rv; 2458 } 2459 #if 0 2460 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2461 __func__, ievent, etsec_read(sc, IMASK)); 2462 #endif 2463 2464 if (ievent & (IEVENT_GRSC | IEVENT_GTSC)) { 2465 sc->sc_imask &= ~(IEVENT_GRSC | IEVENT_GTSC); 2466 etsec_write(sc, IMASK, sc->sc_imask); 2467 wakeup(sc); 2468 } 2469 if (ievent & (IEVENT_MMRD | IEVENT_MMWR)) { 2470 sc->sc_imask &= ~(IEVENT_MMRD | IEVENT_MMWR); 2471 etsec_write(sc, IMASK, sc->sc_imask); 2472 wakeup(&sc->sc_mii); 2473 } 2474 if (ievent & IEVENT_BSY) { 2475 soft_flags |= SOFT_RXBSY; 2476 sc->sc_imask &= ~IEVENT_BSY; 2477 etsec_write(sc, IMASK, sc->sc_imask); 2478 } 2479 if (ievent & IEVENT_TXE) { 2480 soft_flags |= SOFT_TXERROR; 2481 sc->sc_imask &= ~IEVENT_TXE; 2482 sc->sc_txerrors |= ievent; 2483 } 2484 if (ievent & IEVENT_TXC) { 2485 sc->sc_ev_tx_pause.ev_count++; 2486 } 2487 if (ievent & IEVENT_RXC) { 2488 sc->sc_ev_rx_pause.ev_count++; 2489 } 2490 if (ievent & IEVENT_DPE) { 2491 soft_flags |= SOFT_RESET; 2492 sc->sc_imask &= ~IEVENT_DPE; 2493 etsec_write(sc, IMASK, sc->sc_imask); 2494 } 2495 } 2496 } 2497 2498 void 2499 pq3etsec_soft_intr(void *arg) 2500 { 2501 struct pq3etsec_softc * const sc = arg; 2502 struct ifnet * const ifp = &sc->sc_if; 2503 uint32_t imask = 0; 2504 2505 mutex_enter(sc->sc_lock); 2506 2507 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2508 2509 sc->sc_ev_soft_intr.ev_count++; 2510 2511 if (soft_flags & SOFT_RESET) { 2512 int s = splnet(); 2513 pq3etsec_ifinit(ifp); 2514 splx(s); 2515 soft_flags = 0; 2516 } 2517 2518 if (soft_flags & SOFT_RXBSY) { 2519 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2520 size_t threshold = 5 * rxq->rxq_threshold / 4; 2521 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2522 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2523 } else { 2524 imask |= IEVENT_BSY; 2525 } 2526 aprint_normal_dev(sc->sc_dev, 2527 "increasing receive buffers from %zu to %zu\n", 2528 rxq->rxq_threshold, threshold); 2529 rxq->rxq_threshold = threshold; 2530 } 2531 2532 if ((soft_flags & SOFT_TXINTR) 2533 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2534 /* 2535 * Let's do what we came here for. Consume transmitted 2536 * packets off the transmit ring. 2537 */ 2538 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2539 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2540 sc->sc_ev_tx_stall.ev_count++; 2541 ifp->if_flags |= IFF_OACTIVE; 2542 } else { 2543 ifp->if_flags &= ~IFF_OACTIVE; 2544 } 2545 imask |= IEVENT_TXF; 2546 } 2547 2548 if (soft_flags & (SOFT_RXINTR | SOFT_RXBSY)) { 2549 /* Let's consume */ 2550 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2551 imask |= IEVENT_RXF; 2552 } 2553 2554 if (soft_flags & SOFT_TXERROR) { 2555 pq3etsec_tx_error(sc); 2556 imask |= IEVENT_TXE; 2557 } 2558 2559 if (ifp->if_flags & IFF_RUNNING) { 2560 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2561 mutex_spin_enter(sc->sc_hwlock); 2562 sc->sc_imask |= imask; 2563 etsec_write(sc, IMASK, sc->sc_imask); 2564 mutex_spin_exit(sc->sc_hwlock); 2565 } else { 2566 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2567 } 2568 2569 mutex_exit(sc->sc_lock); 2570 } 2571 2572 static void 2573 pq3etsec_mii_tick(void *arg) 2574 { 2575 struct pq3etsec_softc * const sc = arg; 2576 mutex_enter(sc->sc_lock); 2577 callout_ack(&sc->sc_mii_callout); 2578 sc->sc_ev_mii_ticks.ev_count++; 2579 #ifdef DEBUG 2580 uint64_t now = mftb(); 2581 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2582 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2583 __func__, now - sc->sc_mii_last_tick); 2584 callout_stop(&sc->sc_mii_callout); 2585 } 2586 #endif 2587 mii_tick(&sc->sc_mii); 2588 int s = splnet(); 2589 if (sc->sc_soft_flags & SOFT_RESET) 2590 softint_schedule(sc->sc_soft_ih); 2591 splx(s); 2592 callout_schedule(&sc->sc_mii_callout, hz); 2593 #ifdef DEBUG 2594 sc->sc_mii_last_tick = now; 2595 #endif 2596 mutex_exit(sc->sc_lock); 2597 } 2598 2599 static void 2600 pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 2601 { 2602 uint32_t reg; 2603 2604 if (ETSEC_IC_RX_ENABLED(sc)) { 2605 reg = RXIC_ICEN; 2606 reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 2607 reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 2608 } else { 2609 /* Disable RX interrupt coalescing */ 2610 reg = 0; 2611 } 2612 2613 etsec_write(sc, RXIC, reg); 2614 } 2615 2616 static void 2617 pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 2618 { 2619 uint32_t reg; 2620 2621 if (ETSEC_IC_TX_ENABLED(sc)) { 2622 reg = TXIC_ICEN; 2623 reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 2624 reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 2625 } else { 2626 /* Disable TX interrupt coalescing */ 2627 reg = 0; 2628 } 2629 2630 etsec_write(sc, TXIC, reg); 2631 } 2632 2633 /* 2634 * sysctl 2635 */ 2636 static int 2637 pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 2638 { 2639 struct sysctlnode node = *rnode; 2640 struct pq3etsec_softc *sc = rnode->sysctl_data; 2641 int value = *valuep; 2642 int error; 2643 2644 node.sysctl_data = &value; 2645 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2646 if (error != 0 || newp == NULL) 2647 return error; 2648 2649 if (value < 0 || value > 65535) 2650 return EINVAL; 2651 2652 mutex_enter(sc->sc_lock); 2653 *valuep = value; 2654 if (valuep == &sc->sc_ic_rx_time) 2655 pq3etsec_set_ic_rx(sc); 2656 else 2657 pq3etsec_set_ic_tx(sc); 2658 mutex_exit(sc->sc_lock); 2659 2660 return 0; 2661 } 2662 2663 static int 2664 pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 2665 { 2666 struct sysctlnode node = *rnode; 2667 struct pq3etsec_softc *sc = rnode->sysctl_data; 2668 int value = *valuep; 2669 int error; 2670 2671 node.sysctl_data = &value; 2672 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2673 if (error != 0 || newp == NULL) 2674 return error; 2675 2676 if (value < 0 || value > 255) 2677 return EINVAL; 2678 2679 mutex_enter(sc->sc_lock); 2680 *valuep = value; 2681 if (valuep == &sc->sc_ic_rx_count) 2682 pq3etsec_set_ic_rx(sc); 2683 else 2684 pq3etsec_set_ic_tx(sc); 2685 mutex_exit(sc->sc_lock); 2686 2687 return 0; 2688 } 2689 2690 static int 2691 pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 2692 { 2693 struct pq3etsec_softc *sc = rnode->sysctl_data; 2694 2695 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2696 &sc->sc_ic_rx_time); 2697 } 2698 2699 static int 2700 pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 2701 { 2702 struct pq3etsec_softc *sc = rnode->sysctl_data; 2703 2704 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2705 &sc->sc_ic_rx_count); 2706 } 2707 2708 static int 2709 pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 2710 { 2711 struct pq3etsec_softc *sc = rnode->sysctl_data; 2712 2713 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2714 &sc->sc_ic_tx_time); 2715 } 2716 2717 static int 2718 pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 2719 { 2720 struct pq3etsec_softc *sc = rnode->sysctl_data; 2721 2722 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2723 &sc->sc_ic_tx_count); 2724 } 2725 2726 static void pq3etsec_sysctl_setup(struct sysctllog **clog, 2727 struct pq3etsec_softc *sc) 2728 { 2729 const struct sysctlnode *cnode, *rnode; 2730 2731 if (sysctl_createv(clog, 0, NULL, &rnode, 2732 CTLFLAG_PERMANENT, 2733 CTLTYPE_NODE, device_xname(sc->sc_dev), 2734 SYSCTL_DESCR("TSEC interface"), 2735 NULL, 0, NULL, 0, 2736 CTL_HW, CTL_CREATE, CTL_EOL) != 0) 2737 goto bad; 2738 2739 if (sysctl_createv(clog, 0, &rnode, &rnode, 2740 CTLFLAG_PERMANENT, 2741 CTLTYPE_NODE, "int_coal", 2742 SYSCTL_DESCR("Interrupts coalescing"), 2743 NULL, 0, NULL, 0, 2744 CTL_CREATE, CTL_EOL) != 0) 2745 goto bad; 2746 2747 if (sysctl_createv(clog, 0, &rnode, &cnode, 2748 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2749 CTLTYPE_INT, "rx_time", 2750 SYSCTL_DESCR("RX time threshold (0-65535)"), 2751 pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 2752 CTL_CREATE, CTL_EOL) != 0) 2753 goto bad; 2754 2755 if (sysctl_createv(clog, 0, &rnode, &cnode, 2756 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2757 CTLTYPE_INT, "rx_count", 2758 SYSCTL_DESCR("RX frame count threshold (0-255)"), 2759 pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 2760 CTL_CREATE, CTL_EOL) != 0) 2761 goto bad; 2762 2763 if (sysctl_createv(clog, 0, &rnode, &cnode, 2764 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2765 CTLTYPE_INT, "tx_time", 2766 SYSCTL_DESCR("TX time threshold (0-65535)"), 2767 pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 2768 CTL_CREATE, CTL_EOL) != 0) 2769 goto bad; 2770 2771 if (sysctl_createv(clog, 0, &rnode, &cnode, 2772 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2773 CTLTYPE_INT, "tx_count", 2774 SYSCTL_DESCR("TX frame count threshold (0-255)"), 2775 pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 2776 CTL_CREATE, CTL_EOL) != 0) 2777 goto bad; 2778 2779 return; 2780 2781 bad: 2782 aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 2783 } 2784