1 /* $NetBSD: pq3etsec.c,v 1.49 2020/01/30 06:27:13 martin Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 #include "opt_mpc85xx.h" 39 #include "opt_multiprocessor.h" 40 #include "opt_net_mpsafe.h" 41 42 #include <sys/cdefs.h> 43 44 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.49 2020/01/30 06:27:13 martin Exp $"); 45 46 #include <sys/param.h> 47 #include <sys/cpu.h> 48 #include <sys/device.h> 49 #include <sys/mbuf.h> 50 #include <sys/ioctl.h> 51 #include <sys/intr.h> 52 #include <sys/bus.h> 53 #include <sys/kernel.h> 54 #include <sys/kmem.h> 55 #include <sys/proc.h> 56 #include <sys/atomic.h> 57 #include <sys/callout.h> 58 #include <sys/sysctl.h> 59 60 #include <net/if.h> 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 #include <net/if_media.h> 64 #include <net/bpf.h> 65 66 #include <dev/mii/miivar.h> 67 68 #ifdef INET 69 #include <netinet/in.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/in_offload.h> 73 #endif /* INET */ 74 #ifdef INET6 75 #include <netinet6/in6.h> 76 #include <netinet/ip6.h> 77 #endif 78 #include <netinet6/in6_offload.h> 79 80 #include <powerpc/spr.h> 81 #include <powerpc/booke/spr.h> 82 #include <powerpc/booke/cpuvar.h> 83 #include <powerpc/booke/e500var.h> 84 #include <powerpc/booke/e500reg.h> 85 #include <powerpc/booke/etsecreg.h> 86 87 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 88 89 #define ETSEC_MAXTXMBUFS 30 90 #define ETSEC_NTXSEGS 30 91 #define ETSEC_MAXRXMBUFS 511 92 #define ETSEC_MINRXMBUFS 32 93 #define ETSEC_NRXSEGS 1 94 95 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 96 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx \ 97 | IFCAP_CSUM_UDPv4_Rx \ 98 | IFCAP_CSUM_TCPv6_Rx \ 99 | IFCAP_CSUM_UDPv6_Rx) 100 101 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 102 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx \ 103 | IFCAP_CSUM_UDPv4_Tx \ 104 | IFCAP_CSUM_TCPv6_Tx \ 105 | IFCAP_CSUM_UDPv6_Tx) 106 107 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN | IFCAP_RCTRL_TUCSEN \ 108 | IFCAP_TCTRL_IPCSEN | IFCAP_TCTRL_TUCSEN) 109 110 #define M_CSUM_IP (M_CSUM_CIP | M_CSUM_CTU) 111 #define M_CSUM_IP6 (M_CSUM_TCPv6 | M_CSUM_UDPv6) 112 #define M_CSUM_TUP (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 113 #define M_CSUM_UDP (M_CSUM_UDPv4 | M_CSUM_UDPv6) 114 #define M_CSUM_IP4 (M_CSUM_IPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4) 115 #define M_CSUM_CIP (M_CSUM_IPv4) 116 #define M_CSUM_CTU (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 117 118 struct pq3etsec_txqueue { 119 bus_dmamap_t txq_descmap; 120 volatile struct txbd *txq_consumer; 121 volatile struct txbd *txq_producer; 122 volatile struct txbd *txq_first; 123 volatile struct txbd *txq_last; 124 struct ifqueue txq_mbufs; 125 struct mbuf *txq_next; 126 #ifdef ETSEC_DEBUG 127 struct mbuf *txq_lmbufs[512]; 128 #endif 129 uint32_t txq_qmask; 130 uint32_t txq_free; 131 uint32_t txq_threshold; 132 uint32_t txq_lastintr; 133 bus_size_t txq_reg_tbase; 134 bus_dma_segment_t txq_descmap_seg; 135 }; 136 137 struct pq3etsec_rxqueue { 138 bus_dmamap_t rxq_descmap; 139 volatile struct rxbd *rxq_consumer; 140 volatile struct rxbd *rxq_producer; 141 volatile struct rxbd *rxq_first; 142 volatile struct rxbd *rxq_last; 143 struct mbuf *rxq_mhead; 144 struct mbuf **rxq_mtail; 145 struct mbuf *rxq_mconsumer; 146 #ifdef ETSEC_DEBUG 147 struct mbuf *rxq_mbufs[512]; 148 #endif 149 uint32_t rxq_qmask; 150 uint32_t rxq_inuse; 151 uint32_t rxq_threshold; 152 bus_size_t rxq_reg_rbase; 153 bus_size_t rxq_reg_rbptr; 154 bus_dma_segment_t rxq_descmap_seg; 155 }; 156 157 struct pq3etsec_mapcache { 158 u_int dmc_nmaps; 159 u_int dmc_maxseg; 160 u_int dmc_maxmaps; 161 u_int dmc_maxmapsize; 162 bus_dmamap_t dmc_maps[0]; 163 }; 164 165 struct pq3etsec_softc { 166 device_t sc_dev; 167 device_t sc_mdio_dev; 168 struct ethercom sc_ec; 169 #define sc_if sc_ec.ec_if 170 struct mii_data sc_mii; 171 bus_space_tag_t sc_bst; 172 bus_space_handle_t sc_bsh; 173 bus_space_handle_t sc_mdio_bsh; 174 bus_dma_tag_t sc_dmat; 175 int sc_phy_addr; 176 prop_dictionary_t sc_intrmap; 177 uint32_t sc_intrmask; 178 179 uint32_t sc_soft_flags; 180 #define SOFT_RESET 0x0001 181 #define SOFT_RXINTR 0x0010 182 #define SOFT_RXBSY 0x0020 183 #define SOFT_TXINTR 0x0100 184 #define SOFT_TXERROR 0x0200 185 186 struct pq3etsec_txqueue sc_txq; 187 struct pq3etsec_rxqueue sc_rxq; 188 uint32_t sc_txerrors; 189 uint32_t sc_rxerrors; 190 191 size_t sc_rx_adjlen; 192 193 /* 194 * Copies of various ETSEC registers. 195 */ 196 uint32_t sc_imask; 197 uint32_t sc_maccfg1; 198 uint32_t sc_maccfg2; 199 uint32_t sc_maxfrm; 200 uint32_t sc_ecntrl; 201 uint32_t sc_dmactrl; 202 uint32_t sc_macstnaddr1; 203 uint32_t sc_macstnaddr2; 204 uint32_t sc_tctrl; 205 uint32_t sc_rctrl; 206 uint32_t sc_gaddr[16]; 207 uint64_t sc_macaddrs[15]; 208 209 void *sc_tx_ih; 210 void *sc_rx_ih; 211 void *sc_error_ih; 212 void *sc_soft_ih; 213 214 kmutex_t *sc_lock; 215 kmutex_t *sc_hwlock; 216 217 struct evcnt sc_ev_tx_stall; 218 struct evcnt sc_ev_tx_intr; 219 struct evcnt sc_ev_rx_stall; 220 struct evcnt sc_ev_rx_intr; 221 struct evcnt sc_ev_error_intr; 222 struct evcnt sc_ev_soft_intr; 223 struct evcnt sc_ev_tx_pause; 224 struct evcnt sc_ev_rx_pause; 225 struct evcnt sc_ev_mii_ticks; 226 227 struct callout sc_mii_callout; 228 uint64_t sc_mii_last_tick; 229 230 struct ifqueue sc_rx_bufcache; 231 struct pq3etsec_mapcache *sc_rx_mapcache; 232 struct pq3etsec_mapcache *sc_tx_mapcache; 233 234 /* Interrupt Coalescing parameters */ 235 int sc_ic_rx_time; 236 int sc_ic_rx_count; 237 int sc_ic_tx_time; 238 int sc_ic_tx_count; 239 }; 240 241 #define ETSEC_IC_RX_ENABLED(sc) \ 242 ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 243 #define ETSEC_IC_TX_ENABLED(sc) \ 244 ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 245 246 struct pq3mdio_softc { 247 device_t mdio_dev; 248 249 kmutex_t *mdio_lock; 250 251 bus_space_tag_t mdio_bst; 252 bus_space_handle_t mdio_bsh; 253 }; 254 255 static int pq3etsec_match(device_t, cfdata_t, void *); 256 static void pq3etsec_attach(device_t, device_t, void *); 257 258 static int pq3mdio_match(device_t, cfdata_t, void *); 259 static void pq3mdio_attach(device_t, device_t, void *); 260 261 static void pq3etsec_ifstart(struct ifnet *); 262 static void pq3etsec_ifwatchdog(struct ifnet *); 263 static int pq3etsec_ifinit(struct ifnet *); 264 static void pq3etsec_ifstop(struct ifnet *, int); 265 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 266 267 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 268 struct pq3etsec_mapcache **, size_t, size_t, size_t); 269 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 270 struct pq3etsec_mapcache *); 271 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 272 struct pq3etsec_mapcache *); 273 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 274 struct pq3etsec_mapcache *, bus_dmamap_t); 275 276 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 277 struct pq3etsec_txqueue *, u_int); 278 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 279 struct pq3etsec_txqueue *); 280 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 281 struct pq3etsec_txqueue *); 282 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 283 struct pq3etsec_txqueue *); 284 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 285 struct pq3etsec_txqueue *, struct mbuf *m); 286 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 287 struct pq3etsec_txqueue *); 288 289 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 290 struct pq3etsec_rxqueue *, u_int); 291 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 292 struct pq3etsec_rxqueue *); 293 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 294 struct pq3etsec_rxqueue *, bool); 295 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 296 struct pq3etsec_rxqueue *); 297 298 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 299 300 static void pq3etsec_mii_tick(void *); 301 static int pq3etsec_rx_intr(void *); 302 static int pq3etsec_tx_intr(void *); 303 static int pq3etsec_error_intr(void *); 304 static void pq3etsec_soft_intr(void *); 305 306 static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 307 static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 308 309 static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 310 311 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 312 pq3etsec_match, pq3etsec_attach, NULL, NULL); 313 314 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 315 pq3mdio_match, pq3mdio_attach, NULL, NULL); 316 317 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 318 pq3mdio_match, pq3mdio_attach, NULL, NULL); 319 320 static inline uint32_t 321 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 322 { 323 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 324 } 325 326 static inline void 327 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 328 { 329 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 330 } 331 332 static inline uint32_t 333 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 334 { 335 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 336 } 337 338 static int 339 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 340 { 341 return strcmp(cf->cf_name, "mdio") == 0; 342 } 343 344 static int 345 pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 346 { 347 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 348 const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 349 || svr == (SVR_P1016v1 >> 16)); 350 351 if (device_is_a(parent, "cpunode")) { 352 if (!p1025_p 353 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 354 return 0; 355 356 return 1; 357 } 358 359 if (device_is_a(parent, "tsec")) { 360 if (p1025_p 361 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 362 return 0; 363 364 return 1; 365 } 366 367 return 0; 368 } 369 370 static void 371 pq3mdio_attach(device_t parent, device_t self, void *aux) 372 { 373 struct pq3mdio_softc * const mdio = device_private(self); 374 struct cpunode_attach_args * const cna = aux; 375 struct cpunode_locators * const cnl = &cna->cna_locs; 376 377 mdio->mdio_dev = self; 378 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 379 380 if (device_is_a(parent, "cpunode")) { 381 struct cpunode_softc * const psc = device_private(parent); 382 psc->sc_children |= cna->cna_childmask; 383 384 mdio->mdio_bst = cna->cna_memt; 385 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 386 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 387 aprint_error(": error mapping registers @ %#x\n", 388 cnl->cnl_addr); 389 return; 390 } 391 } else { 392 struct pq3etsec_softc * const sc = device_private(parent); 393 394 KASSERT(device_is_a(parent, "tsec")); 395 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 396 || cnl->cnl_addr == ETSEC2_BASE 397 || cnl->cnl_addr == ETSEC3_BASE 398 || cnl->cnl_addr == ETSEC4_BASE, 399 "unknown tsec addr %x", cnl->cnl_addr); 400 401 mdio->mdio_bst = sc->sc_bst; 402 mdio->mdio_bsh = sc->sc_bsh; 403 } 404 405 aprint_normal("\n"); 406 } 407 408 static int 409 pq3mdio_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 410 { 411 struct pq3mdio_softc * const mdio = device_private(self); 412 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 413 414 mutex_enter(mdio->mdio_lock); 415 416 etsec_mdio_write(mdio, MIIMADD, 417 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 418 419 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 420 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 421 422 while (etsec_mdio_read(mdio, MIIMIND) != 0) { 423 delay(1); 424 } 425 *val = etsec_mdio_read(mdio, MIIMSTAT) &0xffff; 426 427 if (miimcom == MIIMCOM_SCAN) 428 etsec_mdio_write(mdio, MIIMCOM, miimcom); 429 430 #if 0 431 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 432 __func__, phy, reg, data); 433 #endif 434 mutex_exit(mdio->mdio_lock); 435 return 0; 436 } 437 438 static int 439 pq3mdio_mii_writereg(device_t self, int phy, int reg, uint16_t data) 440 { 441 struct pq3mdio_softc * const mdio = device_private(self); 442 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 443 444 #if 0 445 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 446 __func__, phy, reg, data); 447 #endif 448 449 mutex_enter(mdio->mdio_lock); 450 451 etsec_mdio_write(mdio, MIIMADD, 452 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 453 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 454 etsec_mdio_write(mdio, MIIMCON, data); 455 456 int timo = 1000; /* 1ms */ 457 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 458 delay(1); 459 } 460 461 if (miimcom == MIIMCOM_SCAN) 462 etsec_mdio_write(mdio, MIIMCOM, miimcom); 463 464 mutex_exit(mdio->mdio_lock); 465 466 return 0; 467 } 468 469 static inline void 470 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 471 { 472 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 473 } 474 475 static void 476 pq3etsec_mii_statchg(struct ifnet *ifp) 477 { 478 struct pq3etsec_softc * const sc = ifp->if_softc; 479 struct mii_data * const mii = &sc->sc_mii; 480 481 uint32_t maccfg1 = sc->sc_maccfg1; 482 uint32_t maccfg2 = sc->sc_maccfg2; 483 uint32_t ecntrl = sc->sc_ecntrl; 484 485 maccfg1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 486 maccfg2 &= ~(MACCFG2_IFMODE | MACCFG2_FD); 487 488 if (sc->sc_mii.mii_media_active & IFM_FDX) { 489 maccfg2 |= MACCFG2_FD; 490 } 491 492 /* 493 * Now deal with the flow control bits. 494 */ 495 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 496 && (mii->mii_media_active & IFM_ETH_FMASK)) { 497 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 498 maccfg1 |= MACCFG1_RX_FLOW; 499 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 500 maccfg1 |= MACCFG1_TX_FLOW; 501 } 502 503 /* 504 * Now deal with the speed. 505 */ 506 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 507 maccfg2 |= MACCFG2_IFMODE_GMII; 508 } else { 509 maccfg2 |= MACCFG2_IFMODE_MII; 510 ecntrl &= ~ECNTRL_R100M; 511 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 512 ecntrl |= ECNTRL_R100M; 513 } 514 } 515 516 /* 517 * If things are different, re-init things. 518 */ 519 if (maccfg1 != sc->sc_maccfg1 520 || maccfg2 != sc->sc_maccfg2 521 || ecntrl != sc->sc_ecntrl) { 522 if (sc->sc_if.if_flags & IFF_RUNNING) 523 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 524 sc->sc_maccfg1 = maccfg1; 525 sc->sc_maccfg2 = maccfg2; 526 sc->sc_ecntrl = ecntrl; 527 } 528 } 529 530 #if 0 531 static void 532 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 533 { 534 struct pq3etsec_softc * const sc = ifp->if_softc; 535 536 mii_pollstat(&sc->sc_mii); 537 ether_mediastatus(ifp, ifmr); 538 ifmr->ifm_status = sc->sc_mii.mii_media_status; 539 ifmr->ifm_active = sc->sc_mii.mii_media_active; 540 } 541 542 static int 543 pq3etsec_mediachange(struct ifnet *ifp) 544 { 545 struct pq3etsec_softc * const sc = ifp->if_softc; 546 547 if ((ifp->if_flags & IFF_UP) == 0) 548 return 0; 549 550 int rv = mii_mediachg(&sc->sc_mii); 551 return (rv == ENXIO) ? 0 : rv; 552 } 553 #endif 554 555 static int 556 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 557 { 558 559 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 560 return 0; 561 562 return 1; 563 } 564 565 static void 566 pq3etsec_attach(device_t parent, device_t self, void *aux) 567 { 568 struct cpunode_softc * const psc = device_private(parent); 569 struct pq3etsec_softc * const sc = device_private(self); 570 struct mii_data * const mii = &sc->sc_mii; 571 struct cpunode_attach_args * const cna = aux; 572 struct cpunode_locators * const cnl = &cna->cna_locs; 573 cfdata_t cf = device_cfdata(self); 574 int error; 575 576 psc->sc_children |= cna->cna_childmask; 577 sc->sc_dev = self; 578 sc->sc_bst = cna->cna_memt; 579 sc->sc_dmat = &booke_bus_dma_tag; 580 581 /* 582 * Pull out the mdio bus and phy we are supposed to use. 583 */ 584 const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 585 const int phy = cf->cf_loc[CPUNODECF_PHY]; 586 if (mdio != CPUNODECF_MDIO_DEFAULT) 587 aprint_normal(" mdio %d", mdio); 588 589 /* 590 * See if the phy is in the config file... 591 */ 592 if (phy != CPUNODECF_PHY_DEFAULT) { 593 sc->sc_phy_addr = phy; 594 } else { 595 unsigned char prop_name[20]; 596 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 597 cnl->cnl_instance); 598 sc->sc_phy_addr = board_info_get_number(prop_name); 599 } 600 if (sc->sc_phy_addr != MII_PHY_ANY) 601 aprint_normal(" phy %d", sc->sc_phy_addr); 602 603 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 604 &sc->sc_bsh); 605 if (error) { 606 aprint_error(": error mapping registers: %d\n", error); 607 return; 608 } 609 610 /* 611 * Assume firmware has aready set the mac address and fetch it 612 * before we reinit it. 613 */ 614 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 615 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 616 sc->sc_rctrl = RCTRL_DEFAULT; 617 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 618 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 619 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 620 621 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 622 size_t len; 623 const uint8_t *mac_addr = 624 board_info_get_data("tsec-mac-addr-base", &len); 625 KASSERT(len == ETHER_ADDR_LEN); 626 sc->sc_macstnaddr2 = 627 (mac_addr[1] << 24) 628 | (mac_addr[0] << 16); 629 sc->sc_macstnaddr1 = 630 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 631 | (mac_addr[4] << 16) 632 | (mac_addr[3] << 8) 633 | (mac_addr[2] << 0); 634 #if 0 635 aprint_error(": mac-address unknown\n"); 636 return; 637 #endif 638 } 639 640 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 641 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 642 643 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 644 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 645 646 /* Disable interrupts */ 647 etsec_write(sc, IMASK, 0); 648 649 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 650 if (error) { 651 aprint_error(": failed to init rxq: %d\n", error); 652 goto fail_1; 653 } 654 655 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 656 if (error) { 657 aprint_error(": failed to init txq: %d\n", error); 658 goto fail_2; 659 } 660 661 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 662 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 663 if (error) { 664 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 665 goto fail_3; 666 } 667 668 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 669 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 670 if (error) { 671 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 672 goto fail_4; 673 } 674 675 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 676 pq3etsec_tx_intr, sc); 677 if (sc->sc_tx_ih == NULL) { 678 aprint_error(": failed to establish tx interrupt: %d\n", 679 cnl->cnl_intrs[0]); 680 goto fail_5; 681 } 682 683 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 684 pq3etsec_rx_intr, sc); 685 if (sc->sc_rx_ih == NULL) { 686 aprint_error(": failed to establish rx interrupt: %d\n", 687 cnl->cnl_intrs[1]); 688 goto fail_6; 689 } 690 691 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 692 pq3etsec_error_intr, sc); 693 if (sc->sc_error_ih == NULL) { 694 aprint_error(": failed to establish error interrupt: %d\n", 695 cnl->cnl_intrs[2]); 696 goto fail_7; 697 } 698 699 int softint_flags = SOFTINT_NET; 700 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 701 softint_flags |= SOFTINT_MPSAFE; 702 #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 703 sc->sc_soft_ih = softint_establish(softint_flags, 704 pq3etsec_soft_intr, sc); 705 if (sc->sc_soft_ih == NULL) { 706 aprint_error(": failed to establish soft interrupt\n"); 707 goto fail_8; 708 } 709 710 /* 711 * If there was no MDIO 712 */ 713 if (mdio == CPUNODECF_MDIO_DEFAULT) { 714 aprint_normal("\n"); 715 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna); 716 if (mdio_cf != NULL) { 717 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL); 718 } 719 } else { 720 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 721 if (sc->sc_mdio_dev == NULL) { 722 aprint_error(": failed to locate mdio device\n"); 723 goto fail_9; 724 } 725 aprint_normal("\n"); 726 } 727 728 etsec_write(sc, ATTR, ATTR_DEFAULT); 729 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 730 731 /* Enable interrupt coalesing */ 732 sc->sc_ic_rx_time = 768; 733 sc->sc_ic_rx_count = 16; 734 sc->sc_ic_tx_time = 768; 735 sc->sc_ic_tx_count = 16; 736 pq3etsec_set_ic_rx(sc); 737 pq3etsec_set_ic_tx(sc); 738 739 char enaddr[ETHER_ADDR_LEN] = { 740 [0] = sc->sc_macstnaddr2 >> 16, 741 [1] = sc->sc_macstnaddr2 >> 24, 742 [2] = sc->sc_macstnaddr1 >> 0, 743 [3] = sc->sc_macstnaddr1 >> 8, 744 [4] = sc->sc_macstnaddr1 >> 16, 745 [5] = sc->sc_macstnaddr1 >> 24, 746 }; 747 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 748 ether_sprintf(enaddr)); 749 750 const char * const xname = device_xname(sc->sc_dev); 751 struct ethercom * const ec = &sc->sc_ec; 752 struct ifnet * const ifp = &ec->ec_if; 753 754 ec->ec_mii = mii; 755 756 mii->mii_ifp = ifp; 757 mii->mii_readreg = pq3mdio_mii_readreg; 758 mii->mii_writereg = pq3mdio_mii_writereg; 759 mii->mii_statchg = pq3etsec_mii_statchg; 760 761 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 762 763 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 764 mii_attach(sc->sc_mdio_dev, mii, 0xffffffff, 765 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 766 767 if (LIST_FIRST(&mii->mii_phys) == NULL) { 768 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 769 0, NULL); 770 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 771 } else { 772 callout_schedule(&sc->sc_mii_callout, hz); 773 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 774 } 775 } else { 776 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 777 0, NULL); 778 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX); 779 } 780 781 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 782 | ETHERCAP_JUMBO_MTU; 783 ec->ec_capenable = ETHERCAP_VLAN_HWTAGGING; 784 785 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 786 ifp->if_softc = sc; 787 ifp->if_capabilities = IFCAP_ETSEC; 788 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 789 ifp->if_ioctl = pq3etsec_ifioctl; 790 ifp->if_start = pq3etsec_ifstart; 791 ifp->if_watchdog = pq3etsec_ifwatchdog; 792 ifp->if_init = pq3etsec_ifinit; 793 ifp->if_stop = pq3etsec_ifstop; 794 IFQ_SET_READY(&ifp->if_snd); 795 796 /* 797 * Attach the interface. 798 */ 799 error = if_initialize(ifp); 800 if (error != 0) { 801 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 802 error); 803 goto fail_10; 804 } 805 pq3etsec_sysctl_setup(NULL, sc); 806 ether_ifattach(ifp, enaddr); 807 if_register(ifp); 808 809 pq3etsec_ifstop(ifp, true); 810 811 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 812 NULL, xname, "rx stall"); 813 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 814 NULL, xname, "tx stall"); 815 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 816 NULL, xname, "tx intr"); 817 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 818 NULL, xname, "rx intr"); 819 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 820 NULL, xname, "error intr"); 821 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 822 NULL, xname, "soft intr"); 823 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 824 NULL, xname, "tx pause"); 825 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 826 NULL, xname, "rx pause"); 827 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 828 NULL, xname, "mii ticks"); 829 return; 830 831 fail_10: 832 ifmedia_removeall(&mii->mii_media); 833 mii_detach(mii, sc->sc_phy_addr, MII_OFFSET_ANY); 834 fail_9: 835 softint_disestablish(sc->sc_soft_ih); 836 fail_8: 837 intr_disestablish(sc->sc_error_ih); 838 fail_7: 839 intr_disestablish(sc->sc_rx_ih); 840 fail_6: 841 intr_disestablish(sc->sc_tx_ih); 842 fail_5: 843 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 844 fail_4: 845 pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache); 846 fail_3: 847 #if 0 /* notyet */ 848 pq3etsec_txq_detach(sc); 849 #endif 850 fail_2: 851 #if 0 /* notyet */ 852 pq3etsec_rxq_detach(sc); 853 #endif 854 fail_1: 855 callout_destroy(&sc->sc_mii_callout); 856 mutex_obj_free(sc->sc_lock); 857 mutex_obj_free(sc->sc_hwlock); 858 bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size); 859 } 860 861 static uint64_t 862 pq3etsec_macaddr_create(const uint8_t *lladdr) 863 { 864 uint64_t macaddr = 0; 865 866 lladdr += ETHER_ADDR_LEN; 867 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 868 macaddr = (macaddr << 8) | *--lladdr; 869 } 870 return macaddr << 16; 871 } 872 873 static int 874 pq3etsec_ifinit(struct ifnet *ifp) 875 { 876 struct pq3etsec_softc * const sc = ifp->if_softc; 877 int error = 0; 878 879 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 880 if (ifp->if_mtu > ETHERMTU_JUMBO) 881 return error; 882 883 KASSERT(ifp->if_flags & IFF_UP); 884 885 /* 886 * Stop the interface (steps 1 to 4 in the Soft Reset and 887 * Reconfigurating Procedure. 888 */ 889 pq3etsec_ifstop(ifp, 0); 890 891 /* 892 * If our frame size has changed (or it's our first time through) 893 * destroy the existing transmit mapcache. 894 */ 895 if (sc->sc_tx_mapcache != NULL 896 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 897 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 898 sc->sc_tx_mapcache = NULL; 899 } 900 901 if (sc->sc_tx_mapcache == NULL) { 902 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 903 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 904 if (error) 905 return error; 906 } 907 908 sc->sc_ev_mii_ticks.ev_count++; 909 mii_tick(&sc->sc_mii); 910 911 if (ifp->if_flags & IFF_PROMISC) { 912 sc->sc_rctrl |= RCTRL_PROM; 913 } else { 914 sc->sc_rctrl &= ~RCTRL_PROM; 915 } 916 917 uint32_t rctrl_prsdep = 0; 918 sc->sc_rctrl &= 919 ~(RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP); 920 if (VLAN_ATTACHED(&sc->sc_ec)) { 921 sc->sc_rctrl |= RCTRL_VLEX; 922 rctrl_prsdep = RCTRL_PRSDEP_L2; 923 } 924 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 925 sc->sc_rctrl |= RCTRL_IPCSEN; 926 rctrl_prsdep = RCTRL_PRSDEP_L3; 927 } 928 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 929 sc->sc_rctrl |= RCTRL_TUCSEN; 930 rctrl_prsdep = RCTRL_PRSDEP_L4; 931 } 932 sc->sc_rctrl |= rctrl_prsdep; 933 #if 0 934 if (sc->sc_rctrl 935 & (RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP)) 936 aprint_normal_dev(sc->sc_dev, 937 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 938 sc->sc_rctrl, 939 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 940 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 941 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 942 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 943 #endif 944 945 sc->sc_tctrl &= ~(TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS); 946 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 947 sc->sc_tctrl |= TCTRL_VLINS; 948 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 949 sc->sc_tctrl |= TCTRL_IPCSEN; 950 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 951 sc->sc_tctrl |= TCTRL_TUCSEN; 952 #if 0 953 if (sc->sc_tctrl & (TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS)) 954 aprint_normal_dev(sc->sc_dev, 955 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 956 sc->sc_tctrl, 957 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 958 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 959 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 960 #endif 961 962 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN | MACCFG1_RX_EN); 963 964 const uint64_t macstnaddr = 965 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 966 967 sc->sc_imask = IEVENT_DPE; 968 969 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 970 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 971 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 972 973 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 974 pq3etsec_txq_reset(sc, &sc->sc_txq); 975 976 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 977 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 978 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 979 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 980 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 981 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 982 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 983 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 984 985 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 986 pq3etsec_mc_setup(sc); 987 988 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 989 etsec_write(sc, MRBLR, MCLBYTES); 990 991 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 992 sc->sc_dmactrl |= DMACTRL_DEFAULT; 993 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 994 995 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 996 etsec_write(sc, TQUEUE, TQUEUE_EN0); 997 sc->sc_imask |= IEVENT_TXF | IEVENT_TXE | IEVENT_TXC; 998 999 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 1000 1001 /* 12. Enable receive queues in RQUEUE, */ 1002 etsec_write(sc, RQUEUE, RQUEUE_EN0 | RQUEUE_EX0); 1003 sc->sc_imask |= IEVENT_RXF | IEVENT_BSY | IEVENT_RXC; 1004 1005 /* and optionally set TOE functionality in RCTRL. */ 1006 etsec_write(sc, RCTRL, sc->sc_rctrl); 1007 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 1008 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 1009 sc->sc_rx_adjlen += sizeof(struct rxfcb); 1010 1011 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 1012 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 1013 1014 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 1015 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 1016 1017 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 1018 sc->sc_dmactrl &= ~(DMACTRL_GRS | DMACTRL_GTS); 1019 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1020 1021 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 1022 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1023 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1024 1025 sc->sc_soft_flags = 0; 1026 1027 etsec_write(sc, IMASK, sc->sc_imask); 1028 1029 ifp->if_flags |= IFF_RUNNING; 1030 1031 return error; 1032 } 1033 1034 static void 1035 pq3etsec_ifstop(struct ifnet *ifp, int disable) 1036 { 1037 struct pq3etsec_softc * const sc = ifp->if_softc; 1038 1039 KASSERT(!cpu_intr_p()); 1040 const uint32_t imask_gsc_mask = IEVENT_GTSC | IEVENT_GRSC; 1041 /* 1042 * Clear the GTSC and GRSC from the interrupt mask until 1043 * we are ready for them. Then clear them from IEVENT, 1044 * request the graceful shutdown, and then enable the 1045 * GTSC and GRSC bits in the mask. This should cause the 1046 * error interrupt to fire which will issue a wakeup to 1047 * allow us to resume. 1048 */ 1049 1050 /* 1051 * 1. Set GRS/GTS bits in DMACTRL register 1052 */ 1053 sc->sc_dmactrl |= DMACTRL_GRS | DMACTRL_GTS; 1054 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1055 etsec_write(sc, IEVENT, imask_gsc_mask); 1056 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1057 1058 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN | MACCFG1_RX_EN)) { 1059 /* 1060 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1061 */ 1062 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1063 1064 u_int timo = 1000; 1065 uint32_t ievent = etsec_read(sc, IEVENT); 1066 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1067 if (--timo == 0) { 1068 aprint_error_dev(sc->sc_dev, 1069 "WARNING: " 1070 "request to stop failed (IEVENT=%#x)\n", 1071 ievent); 1072 break; 1073 } 1074 delay(10); 1075 ievent = etsec_read(sc, IEVENT); 1076 } 1077 } 1078 1079 /* 1080 * Now reset the controller. 1081 * 1082 * 3. Set SOFT_RESET bit in MACCFG1 register 1083 * 4. Clear SOFT_RESET bit in MACCFG1 register 1084 */ 1085 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1086 etsec_write(sc, MACCFG1, 0); 1087 etsec_write(sc, IMASK, 0); 1088 etsec_write(sc, IEVENT, ~0); 1089 sc->sc_imask = 0; 1090 ifp->if_flags &= ~IFF_RUNNING; 1091 1092 uint32_t tbipa = etsec_read(sc, TBIPA); 1093 if (tbipa == sc->sc_phy_addr) { 1094 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1095 etsec_write(sc, TBIPA, 0x1f); 1096 } 1097 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1098 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1099 etsec_write(sc, MIIMCFG, miimcfg); 1100 1101 /* 1102 * Let's consume any remaing transmitted packets. And if we are 1103 * disabling the interface, purge ourselves of any untransmitted 1104 * packets. But don't consume any received packets, just drop them. 1105 * If we aren't disabling the interface, save the mbufs in the 1106 * receive queue for reuse. 1107 */ 1108 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1109 pq3etsec_txq_consume(sc, &sc->sc_txq); 1110 if (disable) { 1111 pq3etsec_txq_purge(sc, &sc->sc_txq); 1112 IFQ_PURGE(&ifp->if_snd); 1113 } 1114 } 1115 1116 static void 1117 pq3etsec_ifwatchdog(struct ifnet *ifp) 1118 { 1119 } 1120 1121 static void 1122 pq3etsec_mc_setup( 1123 struct pq3etsec_softc *sc) 1124 { 1125 struct ethercom * const ec = &sc->sc_ec; 1126 struct ifnet * const ifp = &sc->sc_if; 1127 struct ether_multi *enm; 1128 struct ether_multistep step; 1129 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1130 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1131 1132 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1133 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1134 1135 ifp->if_flags &= ~IFF_ALLMULTI; 1136 1137 ETHER_LOCK(ec); 1138 ETHER_FIRST_MULTI(step, ec, enm); 1139 for (u_int i = 0; enm != NULL; ) { 1140 const char *addr = enm->enm_addrlo; 1141 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1142 ifp->if_flags |= IFF_ALLMULTI; 1143 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1144 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1145 break; 1146 } 1147 if ((sc->sc_rctrl & RCTRL_EMEN) 1148 && i < __arraycount(sc->sc_macaddrs)) { 1149 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1150 } else { 1151 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1152 #if 0 1153 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1154 ether_sprintf(addr), crc, 1155 crc >> crc_shift, 1156 crc >> (crc_shift + 5), 1157 (crc >> crc_shift) & 31, 1158 1 << (((crc >> crc_shift) & 31) ^ 31)); 1159 #endif 1160 /* 1161 * The documentation doesn't completely follow PowerPC 1162 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1163 * is 0x7fa32d9b. By empirical testing, the 1164 * corresponding hash bit is word 3, bit 31 (ppc bit 1165 * order). Since 3 << 31 | 31 is 0x7f, we deduce 1166 * H[0:2] selects the register while H[3:7] selects 1167 * the bit (ppc bit order). 1168 */ 1169 crc >>= crc_shift; 1170 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1171 } 1172 ETHER_NEXT_MULTI(step, enm); 1173 } 1174 ETHER_UNLOCK(ec); 1175 for (u_int i = 0; i < 8; i++) { 1176 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1177 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1178 #if 0 1179 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1180 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1181 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1182 i, GADDR(i), etsec_read(sc, GADDR(i))); 1183 #endif 1184 } 1185 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1186 uint64_t macaddr = sc->sc_macaddrs[i]; 1187 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1188 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1189 #if 0 1190 if (macaddr) 1191 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1192 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1193 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1194 #endif 1195 } 1196 } 1197 1198 static int 1199 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1200 { 1201 struct pq3etsec_softc *sc = ifp->if_softc; 1202 struct ifreq * const ifr = data; 1203 const int s = splnet(); 1204 int error; 1205 1206 switch (cmd) { 1207 case SIOCSIFMEDIA: 1208 /* Flow control requires full-duplex mode. */ 1209 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1210 (ifr->ifr_media & IFM_FDX) == 0) 1211 ifr->ifr_media &= ~IFM_ETH_FMASK; 1212 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1213 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1214 /* We can do both TXPAUSE and RXPAUSE. */ 1215 ifr->ifr_media |= 1216 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1217 } 1218 } 1219 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1220 break; 1221 1222 default: 1223 error = ether_ioctl(ifp, cmd, data); 1224 if (error != ENETRESET) 1225 break; 1226 1227 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1228 error = 0; 1229 if (ifp->if_flags & IFF_RUNNING) 1230 pq3etsec_mc_setup(sc); 1231 break; 1232 } 1233 error = pq3etsec_ifinit(ifp); 1234 break; 1235 } 1236 1237 splx(s); 1238 return error; 1239 } 1240 1241 static void 1242 pq3etsec_rxq_desc_presync( 1243 struct pq3etsec_softc *sc, 1244 struct pq3etsec_rxqueue *rxq, 1245 volatile struct rxbd *rxbd, 1246 size_t count) 1247 { 1248 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1249 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1250 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1251 } 1252 1253 static void 1254 pq3etsec_rxq_desc_postsync( 1255 struct pq3etsec_softc *sc, 1256 struct pq3etsec_rxqueue *rxq, 1257 volatile struct rxbd *rxbd, 1258 size_t count) 1259 { 1260 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1261 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1262 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1263 } 1264 1265 static void 1266 pq3etsec_txq_desc_presync( 1267 struct pq3etsec_softc *sc, 1268 struct pq3etsec_txqueue *txq, 1269 volatile struct txbd *txbd, 1270 size_t count) 1271 { 1272 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1273 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1274 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1275 } 1276 1277 static void 1278 pq3etsec_txq_desc_postsync( 1279 struct pq3etsec_softc *sc, 1280 struct pq3etsec_txqueue *txq, 1281 volatile struct txbd *txbd, 1282 size_t count) 1283 { 1284 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1285 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1286 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1287 } 1288 1289 static bus_dmamap_t 1290 pq3etsec_mapcache_get( 1291 struct pq3etsec_softc *sc, 1292 struct pq3etsec_mapcache *dmc) 1293 { 1294 KASSERT(dmc->dmc_nmaps > 0); 1295 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1296 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1297 } 1298 1299 static void 1300 pq3etsec_mapcache_put( 1301 struct pq3etsec_softc *sc, 1302 struct pq3etsec_mapcache *dmc, 1303 bus_dmamap_t map) 1304 { 1305 KASSERT(map != NULL); 1306 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1307 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1308 } 1309 1310 static void 1311 pq3etsec_mapcache_destroy( 1312 struct pq3etsec_softc *sc, 1313 struct pq3etsec_mapcache *dmc) 1314 { 1315 const size_t dmc_size = 1316 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1317 1318 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1319 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1320 } 1321 kmem_intr_free(dmc, dmc_size); 1322 } 1323 1324 static int 1325 pq3etsec_mapcache_create( 1326 struct pq3etsec_softc *sc, 1327 struct pq3etsec_mapcache **dmc_p, 1328 size_t maxmaps, 1329 size_t maxmapsize, 1330 size_t maxseg) 1331 { 1332 const size_t dmc_size = 1333 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1334 struct pq3etsec_mapcache * const dmc = 1335 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1336 1337 dmc->dmc_maxmaps = maxmaps; 1338 dmc->dmc_nmaps = maxmaps; 1339 dmc->dmc_maxmapsize = maxmapsize; 1340 dmc->dmc_maxseg = maxseg; 1341 1342 for (u_int i = 0; i < maxmaps; i++) { 1343 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1344 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1345 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1346 if (error) { 1347 aprint_error_dev(sc->sc_dev, 1348 "failed to creat dma map cache " 1349 "entry %u of %zu: %d\n", 1350 i, maxmaps, error); 1351 while (i-- > 0) { 1352 bus_dmamap_destroy(sc->sc_dmat, 1353 dmc->dmc_maps[i]); 1354 } 1355 kmem_intr_free(dmc, dmc_size); 1356 return error; 1357 } 1358 KASSERT(dmc->dmc_maps[i] != NULL); 1359 } 1360 1361 *dmc_p = dmc; 1362 1363 return 0; 1364 } 1365 1366 #if 0 1367 static void 1368 pq3etsec_dmamem_free( 1369 bus_dma_tag_t dmat, 1370 size_t map_size, 1371 bus_dma_segment_t *seg, 1372 bus_dmamap_t map, 1373 void *kvap) 1374 { 1375 bus_dmamap_destroy(dmat, map); 1376 bus_dmamem_unmap(dmat, kvap, map_size); 1377 bus_dmamem_free(dmat, seg, 1); 1378 } 1379 #endif 1380 1381 static int 1382 pq3etsec_dmamem_alloc( 1383 bus_dma_tag_t dmat, 1384 size_t map_size, 1385 bus_dma_segment_t *seg, 1386 bus_dmamap_t *map, 1387 void **kvap) 1388 { 1389 int error; 1390 int nseg; 1391 1392 *kvap = NULL; 1393 *map = NULL; 1394 1395 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1396 seg, 1, &nseg, 0); 1397 if (error) 1398 return error; 1399 1400 KASSERT(nseg == 1); 1401 1402 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1403 BUS_DMA_COHERENT); 1404 if (error == 0) { 1405 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1406 map); 1407 if (error == 0) { 1408 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1409 NULL, 0); 1410 if (error == 0) 1411 return 0; 1412 bus_dmamap_destroy(dmat, *map); 1413 *map = NULL; 1414 } 1415 bus_dmamem_unmap(dmat, *kvap, map_size); 1416 *kvap = NULL; 1417 } 1418 bus_dmamem_free(dmat, seg, nseg); 1419 return 0; 1420 } 1421 1422 static struct mbuf * 1423 pq3etsec_rx_buf_alloc( 1424 struct pq3etsec_softc *sc) 1425 { 1426 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1427 if (m == NULL) { 1428 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1429 return NULL; 1430 } 1431 MCLGET(m, M_DONTWAIT); 1432 if ((m->m_flags & M_EXT) == 0) { 1433 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1434 m_freem(m); 1435 return NULL; 1436 } 1437 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1438 1439 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1440 if (map == NULL) { 1441 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1442 m_freem(m); 1443 return NULL; 1444 } 1445 M_SETCTX(m, map); 1446 m->m_len = m->m_pkthdr.len = MCLBYTES; 1447 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1448 BUS_DMA_READ | BUS_DMA_NOWAIT); 1449 if (error) { 1450 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1451 error); 1452 M_SETCTX(m, NULL); 1453 m_freem(m); 1454 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1455 return NULL; 1456 } 1457 KASSERT(map->dm_mapsize == MCLBYTES); 1458 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1459 BUS_DMASYNC_PREREAD); 1460 1461 return m; 1462 } 1463 1464 static void 1465 pq3etsec_rx_map_unload( 1466 struct pq3etsec_softc *sc, 1467 struct mbuf *m) 1468 { 1469 KASSERT(m); 1470 for (; m != NULL; m = m->m_next) { 1471 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1472 KASSERT(map); 1473 KASSERT(map->dm_mapsize == MCLBYTES); 1474 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1475 BUS_DMASYNC_POSTREAD); 1476 bus_dmamap_unload(sc->sc_dmat, map); 1477 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1478 M_SETCTX(m, NULL); 1479 } 1480 } 1481 1482 static bool 1483 pq3etsec_rxq_produce( 1484 struct pq3etsec_softc *sc, 1485 struct pq3etsec_rxqueue *rxq) 1486 { 1487 volatile struct rxbd *producer = rxq->rxq_producer; 1488 #if 0 1489 size_t inuse = rxq->rxq_inuse; 1490 #endif 1491 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1492 struct mbuf *m; 1493 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1494 if (m == NULL) { 1495 m = pq3etsec_rx_buf_alloc(sc); 1496 if (m == NULL) { 1497 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1498 break; 1499 } 1500 } 1501 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1502 KASSERT(map); 1503 1504 #ifdef ETSEC_DEBUG 1505 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1506 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1507 #endif 1508 1509 /* rxbd_len is write-only by the ETSEC */ 1510 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1511 membar_producer(); 1512 producer->rxbd_flags |= RXBD_E; 1513 if (__predict_false(rxq->rxq_mhead == NULL)) { 1514 KASSERT(producer == rxq->rxq_consumer); 1515 rxq->rxq_mconsumer = m; 1516 } 1517 *rxq->rxq_mtail = m; 1518 rxq->rxq_mtail = &m->m_next; 1519 m->m_len = MCLBYTES; 1520 m->m_next = NULL; 1521 rxq->rxq_inuse++; 1522 if (++producer == rxq->rxq_last) { 1523 membar_producer(); 1524 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1525 rxq->rxq_last - rxq->rxq_producer); 1526 producer = rxq->rxq_producer = rxq->rxq_first; 1527 } 1528 } 1529 if (producer != rxq->rxq_producer) { 1530 membar_producer(); 1531 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1532 producer - rxq->rxq_producer); 1533 rxq->rxq_producer = producer; 1534 } 1535 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1536 if (qhlt) { 1537 KASSERT(qhlt & rxq->rxq_qmask); 1538 sc->sc_ev_rx_stall.ev_count++; 1539 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1540 } 1541 #if 0 1542 aprint_normal_dev(sc->sc_dev, 1543 "%s: buffers inuse went from %zu to %zu\n", 1544 __func__, inuse, rxq->rxq_inuse); 1545 #endif 1546 return true; 1547 } 1548 1549 static bool 1550 pq3etsec_rx_offload( 1551 struct pq3etsec_softc *sc, 1552 struct mbuf *m, 1553 const struct rxfcb *fcb) 1554 { 1555 if (fcb->rxfcb_flags & RXFCB_VLN) { 1556 vlan_set_tag(m, fcb->rxfcb_vlctl); 1557 } 1558 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1559 || (fcb->rxfcb_flags & (RXFCB_CIP | RXFCB_CTU)) == 0) 1560 return true; 1561 int csum_flags = 0; 1562 if ((fcb->rxfcb_flags & (RXFCB_IP6 | RXFCB_CIP)) == RXFCB_CIP) { 1563 csum_flags |= M_CSUM_IPv4; 1564 if (fcb->rxfcb_flags & RXFCB_EIP) 1565 csum_flags |= M_CSUM_IPv4_BAD; 1566 } 1567 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1568 int ipv_flags; 1569 if (fcb->rxfcb_flags & RXFCB_IP6) 1570 ipv_flags = M_CSUM_TCPv6 | M_CSUM_UDPv6; 1571 else 1572 ipv_flags = M_CSUM_TCPv4 | M_CSUM_UDPv4; 1573 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1574 csum_flags |= (M_CSUM_TCPv4 |M_CSUM_TCPv6) & ipv_flags; 1575 } else { 1576 csum_flags |= (M_CSUM_UDPv4 |M_CSUM_UDPv6) & ipv_flags; 1577 } 1578 if (fcb->rxfcb_flags & RXFCB_ETU) 1579 csum_flags |= M_CSUM_TCP_UDP_BAD; 1580 } 1581 1582 m->m_pkthdr.csum_flags = csum_flags; 1583 return true; 1584 } 1585 1586 static void 1587 pq3etsec_rx_input( 1588 struct pq3etsec_softc *sc, 1589 struct mbuf *m, 1590 uint16_t rxbd_flags) 1591 { 1592 struct ifnet * const ifp = &sc->sc_if; 1593 1594 pq3etsec_rx_map_unload(sc, m); 1595 1596 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1597 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1598 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1599 return; 1600 } 1601 m_adj(m, sc->sc_rx_adjlen); 1602 1603 if (rxbd_flags & RXBD_M) 1604 m->m_flags |= M_PROMISC; 1605 if (rxbd_flags & RXBD_BC) 1606 m->m_flags |= M_BCAST; 1607 if (rxbd_flags & RXBD_MC) 1608 m->m_flags |= M_MCAST; 1609 m->m_flags |= M_HASFCS; 1610 m_set_rcvif(m, &sc->sc_if); 1611 1612 /* 1613 * Let's give it to the network subsystm to deal with. 1614 */ 1615 int s = splnet(); 1616 if_input(ifp, m); 1617 splx(s); 1618 } 1619 1620 static void 1621 pq3etsec_rxq_consume( 1622 struct pq3etsec_softc *sc, 1623 struct pq3etsec_rxqueue *rxq) 1624 { 1625 struct ifnet * const ifp = &sc->sc_if; 1626 volatile struct rxbd *consumer = rxq->rxq_consumer; 1627 size_t rxconsumed = 0; 1628 1629 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1630 1631 for (;;) { 1632 if (consumer == rxq->rxq_producer) { 1633 rxq->rxq_consumer = consumer; 1634 rxq->rxq_inuse -= rxconsumed; 1635 KASSERT(rxq->rxq_inuse == 0); 1636 return; 1637 } 1638 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1639 const uint16_t rxbd_flags = consumer->rxbd_flags; 1640 if (rxbd_flags & RXBD_E) { 1641 rxq->rxq_consumer = consumer; 1642 rxq->rxq_inuse -= rxconsumed; 1643 return; 1644 } 1645 KASSERT(rxq->rxq_mconsumer != NULL); 1646 #ifdef ETSEC_DEBUG 1647 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1648 #endif 1649 #if 0 1650 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1651 __func__, 1652 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1653 mtod(rxq->rxq_mconsumer, int *)[0], 1654 mtod(rxq->rxq_mconsumer, int *)[1], 1655 mtod(rxq->rxq_mconsumer, int *)[2], 1656 mtod(rxq->rxq_mconsumer, int *)[3]); 1657 #endif 1658 /* 1659 * We own this packet again. Clear all flags except wrap. 1660 */ 1661 rxconsumed++; 1662 consumer->rxbd_flags = rxbd_flags & (RXBD_W | RXBD_I); 1663 1664 /* 1665 * If this descriptor has the LAST bit set and no errors, 1666 * it's a valid input packet. 1667 */ 1668 if ((rxbd_flags & (RXBD_L | RXBD_ERRORS)) == RXBD_L) { 1669 size_t rxbd_len = consumer->rxbd_len; 1670 struct mbuf *m = rxq->rxq_mhead; 1671 struct mbuf *m_last = rxq->rxq_mconsumer; 1672 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1673 rxq->rxq_mtail = &rxq->rxq_mhead; 1674 rxq->rxq_mconsumer = rxq->rxq_mhead; 1675 m_last->m_next = NULL; 1676 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1677 m->m_pkthdr.len = rxbd_len; 1678 pq3etsec_rx_input(sc, m, rxbd_flags); 1679 } else if (rxbd_flags & RXBD_L) { 1680 KASSERT(rxbd_flags & RXBD_ERRORS); 1681 struct mbuf *m; 1682 /* 1683 * We encountered an error, take the mbufs and add 1684 * then to the rx bufcache so we can reuse them. 1685 */ 1686 if_statinc(ifp, if_ierrors); 1687 for (m = rxq->rxq_mhead; 1688 m != rxq->rxq_mconsumer; 1689 m = m->m_next) { 1690 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1691 } 1692 m = rxq->rxq_mconsumer; 1693 if ((rxq->rxq_mhead = m->m_next) == NULL) 1694 rxq->rxq_mtail = &rxq->rxq_mhead; 1695 rxq->rxq_mconsumer = m->m_next; 1696 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1697 } else { 1698 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1699 } 1700 #ifdef ETSEC_DEBUG 1701 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1702 #endif 1703 1704 /* 1705 * Wrap at the last entry! 1706 */ 1707 if (rxbd_flags & RXBD_W) { 1708 KASSERT(consumer + 1 == rxq->rxq_last); 1709 consumer = rxq->rxq_first; 1710 } else { 1711 consumer++; 1712 } 1713 #ifdef ETSEC_DEBUG 1714 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1715 #endif 1716 } 1717 } 1718 1719 static void 1720 pq3etsec_rxq_purge( 1721 struct pq3etsec_softc *sc, 1722 struct pq3etsec_rxqueue *rxq, 1723 bool discard) 1724 { 1725 struct mbuf *m; 1726 1727 if ((m = rxq->rxq_mhead) != NULL) { 1728 #ifdef ETSEC_DEBUG 1729 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1730 #endif 1731 1732 if (discard) { 1733 pq3etsec_rx_map_unload(sc, m); 1734 m_freem(m); 1735 } else { 1736 while (m != NULL) { 1737 struct mbuf *m0 = m->m_next; 1738 m->m_next = NULL; 1739 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1740 m = m0; 1741 } 1742 } 1743 } 1744 1745 rxq->rxq_mconsumer = NULL; 1746 rxq->rxq_mhead = NULL; 1747 rxq->rxq_mtail = &rxq->rxq_mhead; 1748 rxq->rxq_inuse = 0; 1749 } 1750 1751 static void 1752 pq3etsec_rxq_reset( 1753 struct pq3etsec_softc *sc, 1754 struct pq3etsec_rxqueue *rxq) 1755 { 1756 /* 1757 * sync all the descriptors 1758 */ 1759 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1760 rxq->rxq_last - rxq->rxq_first); 1761 1762 /* 1763 * Make sure we own all descriptors in the ring. 1764 */ 1765 volatile struct rxbd *rxbd; 1766 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1767 rxbd->rxbd_flags = RXBD_I; 1768 } 1769 1770 /* 1771 * Last descriptor has the wrap flag. 1772 */ 1773 rxbd->rxbd_flags = RXBD_W | RXBD_I; 1774 1775 /* 1776 * Reset the producer consumer indexes. 1777 */ 1778 rxq->rxq_consumer = rxq->rxq_first; 1779 rxq->rxq_producer = rxq->rxq_first; 1780 rxq->rxq_inuse = 0; 1781 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1782 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1783 1784 sc->sc_imask |= IEVENT_RXF | IEVENT_BSY; 1785 1786 /* 1787 * Restart the transmit at the first descriptor 1788 */ 1789 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1790 } 1791 1792 static int 1793 pq3etsec_rxq_attach( 1794 struct pq3etsec_softc *sc, 1795 struct pq3etsec_rxqueue *rxq, 1796 u_int qno) 1797 { 1798 size_t map_size = PAGE_SIZE; 1799 size_t desc_count = map_size / sizeof(struct rxbd); 1800 int error; 1801 void *descs; 1802 1803 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1804 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1805 if (error) 1806 return error; 1807 1808 memset(descs, 0, map_size); 1809 rxq->rxq_first = descs; 1810 rxq->rxq_last = rxq->rxq_first + desc_count; 1811 rxq->rxq_consumer = descs; 1812 rxq->rxq_producer = descs; 1813 1814 pq3etsec_rxq_purge(sc, rxq, true); 1815 pq3etsec_rxq_reset(sc, rxq); 1816 1817 rxq->rxq_reg_rbase = RBASEn(qno); 1818 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1819 1820 return 0; 1821 } 1822 1823 static bool 1824 pq3etsec_txq_active_p( 1825 struct pq3etsec_softc * const sc, 1826 struct pq3etsec_txqueue *txq) 1827 { 1828 return !IF_IS_EMPTY(&txq->txq_mbufs); 1829 } 1830 1831 static bool 1832 pq3etsec_txq_fillable_p( 1833 struct pq3etsec_softc * const sc, 1834 struct pq3etsec_txqueue *txq) 1835 { 1836 return txq->txq_free >= txq->txq_threshold; 1837 } 1838 1839 static int 1840 pq3etsec_txq_attach( 1841 struct pq3etsec_softc *sc, 1842 struct pq3etsec_txqueue *txq, 1843 u_int qno) 1844 { 1845 size_t map_size = PAGE_SIZE; 1846 size_t desc_count = map_size / sizeof(struct txbd); 1847 int error; 1848 void *descs; 1849 1850 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1851 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1852 if (error) 1853 return error; 1854 1855 memset(descs, 0, map_size); 1856 txq->txq_first = descs; 1857 txq->txq_last = txq->txq_first + desc_count; 1858 txq->txq_consumer = descs; 1859 txq->txq_producer = descs; 1860 1861 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1862 1863 txq->txq_reg_tbase = TBASEn(qno); 1864 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1865 1866 pq3etsec_txq_reset(sc, txq); 1867 1868 return 0; 1869 } 1870 1871 static int 1872 pq3etsec_txq_map_load( 1873 struct pq3etsec_softc *sc, 1874 struct pq3etsec_txqueue *txq, 1875 struct mbuf *m) 1876 { 1877 bus_dmamap_t map; 1878 int error; 1879 1880 map = M_GETCTX(m, bus_dmamap_t); 1881 if (map != NULL) 1882 return 0; 1883 1884 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1885 if (map == NULL) 1886 return ENOMEM; 1887 1888 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1889 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1890 if (error) 1891 return error; 1892 1893 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1894 BUS_DMASYNC_PREWRITE); 1895 M_SETCTX(m, map); 1896 return 0; 1897 } 1898 1899 static void 1900 pq3etsec_txq_map_unload( 1901 struct pq3etsec_softc *sc, 1902 struct pq3etsec_txqueue *txq, 1903 struct mbuf *m) 1904 { 1905 KASSERT(m); 1906 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1907 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1908 BUS_DMASYNC_POSTWRITE); 1909 bus_dmamap_unload(sc->sc_dmat, map); 1910 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1911 } 1912 1913 static bool 1914 pq3etsec_txq_produce( 1915 struct pq3etsec_softc *sc, 1916 struct pq3etsec_txqueue *txq, 1917 struct mbuf *m) 1918 { 1919 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1920 1921 if (map->dm_nsegs > txq->txq_free) 1922 return false; 1923 1924 /* 1925 * TCP Offload flag must be set in the first descriptor. 1926 */ 1927 volatile struct txbd *producer = txq->txq_producer; 1928 uint16_t last_flags = TXBD_L; 1929 uint16_t first_flags = TXBD_R 1930 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1931 1932 /* 1933 * If we've produced enough descriptors without consuming any 1934 * we need to ask for an interrupt to reclaim some. 1935 */ 1936 txq->txq_lastintr += map->dm_nsegs; 1937 if (ETSEC_IC_TX_ENABLED(sc) 1938 || txq->txq_lastintr >= txq->txq_threshold 1939 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1940 txq->txq_lastintr = 0; 1941 last_flags |= TXBD_I; 1942 } 1943 1944 #ifdef ETSEC_DEBUG 1945 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1946 #endif 1947 KASSERT(producer != txq->txq_last); 1948 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1949 producer->txbd_len = map->dm_segs[0].ds_len; 1950 1951 if (map->dm_nsegs > 1) { 1952 volatile struct txbd *start = producer + 1; 1953 size_t count = map->dm_nsegs - 1; 1954 for (u_int i = 1; i < map->dm_nsegs; i++) { 1955 if (__predict_false(++producer == txq->txq_last)) { 1956 producer = txq->txq_first; 1957 if (start < txq->txq_last) { 1958 pq3etsec_txq_desc_presync(sc, txq, 1959 start, txq->txq_last - start); 1960 count -= txq->txq_last - start; 1961 } 1962 start = txq->txq_first; 1963 } 1964 #ifdef ETSEC_DEBUG 1965 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1966 #endif 1967 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1968 producer->txbd_len = map->dm_segs[i].ds_len; 1969 producer->txbd_flags = TXBD_R 1970 | (producer->txbd_flags & TXBD_W) 1971 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1972 #if 0 1973 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1974 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1975 #endif 1976 } 1977 pq3etsec_txq_desc_presync(sc, txq, start, count); 1978 } else { 1979 first_flags |= last_flags; 1980 } 1981 1982 membar_producer(); 1983 txq->txq_producer->txbd_flags = 1984 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1985 #if 0 1986 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1987 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1988 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1989 #endif 1990 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1991 1992 /* 1993 * Reduce free count by the number of segments we consumed. 1994 */ 1995 txq->txq_free -= map->dm_nsegs; 1996 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1997 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1998 KASSERT(producer->txbd_flags & TXBD_L); 1999 #ifdef ETSEC_DEBUG 2000 txq->txq_lmbufs[producer - txq->txq_first] = m; 2001 #endif 2002 2003 #if 0 2004 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 2005 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 2006 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 2007 #endif 2008 2009 if (++producer == txq->txq_last) 2010 txq->txq_producer = txq->txq_first; 2011 else 2012 txq->txq_producer = producer; 2013 IF_ENQUEUE(&txq->txq_mbufs, m); 2014 2015 /* 2016 * Restart the transmitter. 2017 */ 2018 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 2019 2020 return true; 2021 } 2022 2023 static void 2024 pq3etsec_tx_offload( 2025 struct pq3etsec_softc *sc, 2026 struct pq3etsec_txqueue *txq, 2027 struct mbuf **mp) 2028 { 2029 struct mbuf *m = *mp; 2030 u_int csum_flags = m->m_pkthdr.csum_flags; 2031 bool have_vtag; 2032 uint16_t vtag; 2033 2034 KASSERT(m->m_flags & M_PKTHDR); 2035 2036 have_vtag = vlan_has_tag(m); 2037 vtag = (have_vtag) ? vlan_get_tag(m) : 0; 2038 2039 /* 2040 * Let see if we are doing any offload first. 2041 */ 2042 if (csum_flags == 0 && !have_vtag) { 2043 m->m_flags &= ~M_HASFCB; 2044 return; 2045 } 2046 2047 uint16_t flags = 0; 2048 if (csum_flags & M_CSUM_IP) { 2049 flags |= TXFCB_IP 2050 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2051 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2052 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2053 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2054 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2055 } 2056 if (have_vtag) { 2057 flags |= TXFCB_VLN; 2058 } 2059 if (flags == 0) { 2060 m->m_flags &= ~M_HASFCB; 2061 return; 2062 } 2063 2064 struct txfcb fcb; 2065 fcb.txfcb_flags = flags; 2066 if (csum_flags & M_CSUM_IPv4) 2067 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2068 else 2069 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2070 fcb.txfcb_l3os = ETHER_HDR_LEN; 2071 fcb.txfcb_phcs = 0; 2072 fcb.txfcb_vlctl = vtag; 2073 2074 #if 0 2075 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2076 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2077 fcb.txfcb_phcs, fcb.txfcb_vlctl); 2078 #endif 2079 2080 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2081 m->m_data -= sizeof(fcb); 2082 m->m_len += sizeof(fcb); 2083 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2084 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2085 m->m_data = m->m_pktdat; 2086 m->m_len += sizeof(fcb); 2087 } else { 2088 struct mbuf *mn; 2089 MGET(mn, M_DONTWAIT, m->m_type); 2090 if (mn == NULL) { 2091 if (csum_flags & M_CSUM_IP4) { 2092 #ifdef INET 2093 in_undefer_cksum(m, ETHER_HDR_LEN, 2094 csum_flags & M_CSUM_IP4); 2095 #else 2096 panic("%s: impossible M_CSUM flags %#x", 2097 device_xname(sc->sc_dev), csum_flags); 2098 #endif 2099 } else if (csum_flags & M_CSUM_IP6) { 2100 #ifdef INET6 2101 in6_undefer_cksum(m, ETHER_HDR_LEN, 2102 csum_flags & M_CSUM_IP6); 2103 #else 2104 panic("%s: impossible M_CSUM flags %#x", 2105 device_xname(sc->sc_dev), csum_flags); 2106 #endif 2107 } 2108 2109 m->m_flags &= ~M_HASFCB; 2110 return; 2111 } 2112 2113 m_move_pkthdr(mn, m); 2114 mn->m_next = m; 2115 m = mn; 2116 m_align(m, sizeof(fcb)); 2117 m->m_len = sizeof(fcb); 2118 *mp = m; 2119 } 2120 m->m_pkthdr.len += sizeof(fcb); 2121 m->m_flags |= M_HASFCB; 2122 *mtod(m, struct txfcb *) = fcb; 2123 return; 2124 } 2125 2126 static bool 2127 pq3etsec_txq_enqueue( 2128 struct pq3etsec_softc *sc, 2129 struct pq3etsec_txqueue *txq) 2130 { 2131 for (;;) { 2132 if (IF_QFULL(&txq->txq_mbufs)) 2133 return false; 2134 struct mbuf *m = txq->txq_next; 2135 if (m == NULL) { 2136 int s = splnet(); 2137 IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2138 splx(s); 2139 if (m == NULL) 2140 return true; 2141 M_SETCTX(m, NULL); 2142 pq3etsec_tx_offload(sc, txq, &m); 2143 } else { 2144 txq->txq_next = NULL; 2145 } 2146 int error = pq3etsec_txq_map_load(sc, txq, m); 2147 if (error) { 2148 aprint_error_dev(sc->sc_dev, 2149 "discarded packet due to " 2150 "dmamap load failure: %d\n", error); 2151 m_freem(m); 2152 continue; 2153 } 2154 KASSERT(txq->txq_next == NULL); 2155 if (!pq3etsec_txq_produce(sc, txq, m)) { 2156 txq->txq_next = m; 2157 return false; 2158 } 2159 KASSERT(txq->txq_next == NULL); 2160 } 2161 } 2162 2163 static bool 2164 pq3etsec_txq_consume( 2165 struct pq3etsec_softc *sc, 2166 struct pq3etsec_txqueue *txq) 2167 { 2168 struct ifnet * const ifp = &sc->sc_if; 2169 volatile struct txbd *consumer = txq->txq_consumer; 2170 size_t txfree = 0; 2171 2172 #if 0 2173 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2174 #endif 2175 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2176 2177 for (;;) { 2178 if (consumer == txq->txq_producer) { 2179 txq->txq_consumer = consumer; 2180 txq->txq_free += txfree; 2181 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2182 #if 0 2183 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2184 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2185 #endif 2186 KASSERT(txq->txq_lastintr == 0); 2187 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2188 return true; 2189 } 2190 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2191 const uint16_t txbd_flags = consumer->txbd_flags; 2192 if (txbd_flags & TXBD_R) { 2193 txq->txq_consumer = consumer; 2194 txq->txq_free += txfree; 2195 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2196 #if 0 2197 printf("%s: freed %zu descriptors\n", 2198 __func__, txfree); 2199 #endif 2200 return pq3etsec_txq_fillable_p(sc, txq); 2201 } 2202 2203 /* 2204 * If this is the last descriptor in the chain, get the 2205 * mbuf, free its dmamap, and free the mbuf chain itself. 2206 */ 2207 if (txbd_flags & TXBD_L) { 2208 struct mbuf *m; 2209 2210 IF_DEQUEUE(&txq->txq_mbufs, m); 2211 #ifdef ETSEC_DEBUG 2212 KASSERTMSG( 2213 m == txq->txq_lmbufs[consumer-txq->txq_first], 2214 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2215 __func__, consumer, consumer - txq->txq_first, 2216 txbd_flags, m, 2217 &txq->txq_lmbufs[consumer-txq->txq_first], 2218 txq->txq_lmbufs[consumer-txq->txq_first]); 2219 #endif 2220 KASSERT(m); 2221 pq3etsec_txq_map_unload(sc, txq, m); 2222 #if 0 2223 printf("%s: mbuf %p: consumed a %u byte packet\n", 2224 __func__, m, m->m_pkthdr.len); 2225 #endif 2226 if (m->m_flags & M_HASFCB) 2227 m_adj(m, sizeof(struct txfcb)); 2228 bpf_mtap(ifp, m, BPF_D_OUT); 2229 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2230 if_statinc_ref(nsr, if_opackets); 2231 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2232 if (m->m_flags & M_MCAST) 2233 if_statinc_ref(nsr, if_omcasts); 2234 if (txbd_flags & TXBD_ERRORS) 2235 if_statinc_ref(nsr, if_oerrors); 2236 IF_STAT_PUTREF(ifp); 2237 m_freem(m); 2238 #ifdef ETSEC_DEBUG 2239 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2240 #endif 2241 } else { 2242 #ifdef ETSEC_DEBUG 2243 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2244 #endif 2245 } 2246 2247 /* 2248 * We own this packet again. Clear all flags except wrap. 2249 */ 2250 txfree++; 2251 //consumer->txbd_flags = txbd_flags & TXBD_W; 2252 2253 /* 2254 * Wrap at the last entry! 2255 */ 2256 if (txbd_flags & TXBD_W) { 2257 KASSERT(consumer + 1 == txq->txq_last); 2258 consumer = txq->txq_first; 2259 } else { 2260 consumer++; 2261 KASSERT(consumer < txq->txq_last); 2262 } 2263 } 2264 } 2265 2266 static void 2267 pq3etsec_txq_purge( 2268 struct pq3etsec_softc *sc, 2269 struct pq3etsec_txqueue *txq) 2270 { 2271 struct mbuf *m; 2272 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2273 2274 for (;;) { 2275 IF_DEQUEUE(&txq->txq_mbufs, m); 2276 if (m == NULL) 2277 break; 2278 pq3etsec_txq_map_unload(sc, txq, m); 2279 m_freem(m); 2280 } 2281 if ((m = txq->txq_next) != NULL) { 2282 txq->txq_next = NULL; 2283 pq3etsec_txq_map_unload(sc, txq, m); 2284 m_freem(m); 2285 } 2286 #ifdef ETSEC_DEBUG 2287 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2288 #endif 2289 } 2290 2291 static void 2292 pq3etsec_txq_reset( 2293 struct pq3etsec_softc *sc, 2294 struct pq3etsec_txqueue *txq) 2295 { 2296 /* 2297 * sync all the descriptors 2298 */ 2299 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2300 txq->txq_last - txq->txq_first); 2301 2302 /* 2303 * Make sure we own all descriptors in the ring. 2304 */ 2305 volatile struct txbd *txbd; 2306 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2307 txbd->txbd_flags = 0; 2308 } 2309 2310 /* 2311 * Last descriptor has the wrap flag. 2312 */ 2313 txbd->txbd_flags = TXBD_W; 2314 2315 /* 2316 * Reset the producer consumer indexes. 2317 */ 2318 txq->txq_consumer = txq->txq_first; 2319 txq->txq_producer = txq->txq_first; 2320 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2321 txq->txq_threshold = txq->txq_free / 2; 2322 txq->txq_lastintr = 0; 2323 2324 /* 2325 * What do we want to get interrupted on? 2326 */ 2327 sc->sc_imask |= IEVENT_TXF | IEVENT_TXE; 2328 2329 /* 2330 * Restart the transmit at the first descriptor 2331 */ 2332 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2333 } 2334 2335 static void 2336 pq3etsec_ifstart(struct ifnet *ifp) 2337 { 2338 struct pq3etsec_softc * const sc = ifp->if_softc; 2339 2340 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) { 2341 return; 2342 } 2343 2344 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2345 softint_schedule(sc->sc_soft_ih); 2346 } 2347 2348 static void 2349 pq3etsec_tx_error( 2350 struct pq3etsec_softc * const sc) 2351 { 2352 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2353 2354 pq3etsec_txq_consume(sc, txq); 2355 2356 if (pq3etsec_txq_fillable_p(sc, txq)) 2357 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2358 if (sc->sc_txerrors 2359 & (IEVENT_LC | IEVENT_CRL | IEVENT_XFUN | IEVENT_BABT)) { 2360 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2361 } 2362 2363 if (pq3etsec_txq_active_p(sc, txq)) 2364 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2365 if (!pq3etsec_txq_enqueue(sc, txq)) { 2366 sc->sc_ev_tx_stall.ev_count++; 2367 sc->sc_if.if_flags |= IFF_OACTIVE; 2368 } 2369 2370 sc->sc_txerrors = 0; 2371 } 2372 2373 int 2374 pq3etsec_tx_intr(void *arg) 2375 { 2376 struct pq3etsec_softc * const sc = arg; 2377 2378 mutex_enter(sc->sc_hwlock); 2379 2380 sc->sc_ev_tx_intr.ev_count++; 2381 2382 uint32_t ievent = etsec_read(sc, IEVENT); 2383 ievent &= IEVENT_TXF | IEVENT_TXB; 2384 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2385 2386 #if 0 2387 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2388 __func__, ievent, etsec_read(sc, IMASK)); 2389 #endif 2390 2391 if (ievent == 0) { 2392 mutex_exit(sc->sc_hwlock); 2393 return 0; 2394 } 2395 2396 sc->sc_imask &= ~(IEVENT_TXF | IEVENT_TXB); 2397 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2398 etsec_write(sc, IMASK, sc->sc_imask); 2399 softint_schedule(sc->sc_soft_ih); 2400 2401 mutex_exit(sc->sc_hwlock); 2402 2403 return 1; 2404 } 2405 2406 int 2407 pq3etsec_rx_intr(void *arg) 2408 { 2409 struct pq3etsec_softc * const sc = arg; 2410 2411 mutex_enter(sc->sc_hwlock); 2412 2413 sc->sc_ev_rx_intr.ev_count++; 2414 2415 uint32_t ievent = etsec_read(sc, IEVENT); 2416 ievent &= IEVENT_RXF | IEVENT_RXB; 2417 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2418 if (ievent == 0) { 2419 mutex_exit(sc->sc_hwlock); 2420 return 0; 2421 } 2422 2423 #if 0 2424 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2425 #endif 2426 2427 sc->sc_imask &= ~(IEVENT_RXF | IEVENT_RXB); 2428 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2429 etsec_write(sc, IMASK, sc->sc_imask); 2430 softint_schedule(sc->sc_soft_ih); 2431 2432 mutex_exit(sc->sc_hwlock); 2433 2434 return 1; 2435 } 2436 2437 int 2438 pq3etsec_error_intr(void *arg) 2439 { 2440 struct pq3etsec_softc * const sc = arg; 2441 2442 mutex_enter(sc->sc_hwlock); 2443 2444 sc->sc_ev_error_intr.ev_count++; 2445 2446 for (int rv = 0, soft_flags = 0;; rv = 1) { 2447 uint32_t ievent = etsec_read(sc, IEVENT); 2448 ievent &= ~(IEVENT_RXF | IEVENT_RXB | IEVENT_TXF | IEVENT_TXB); 2449 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2450 if (ievent == 0) { 2451 if (soft_flags) { 2452 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2453 softint_schedule(sc->sc_soft_ih); 2454 } 2455 mutex_exit(sc->sc_hwlock); 2456 return rv; 2457 } 2458 #if 0 2459 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2460 __func__, ievent, etsec_read(sc, IMASK)); 2461 #endif 2462 2463 if (ievent & (IEVENT_GRSC | IEVENT_GTSC)) { 2464 sc->sc_imask &= ~(IEVENT_GRSC | IEVENT_GTSC); 2465 etsec_write(sc, IMASK, sc->sc_imask); 2466 wakeup(sc); 2467 } 2468 if (ievent & (IEVENT_MMRD | IEVENT_MMWR)) { 2469 sc->sc_imask &= ~(IEVENT_MMRD | IEVENT_MMWR); 2470 etsec_write(sc, IMASK, sc->sc_imask); 2471 wakeup(&sc->sc_mii); 2472 } 2473 if (ievent & IEVENT_BSY) { 2474 soft_flags |= SOFT_RXBSY; 2475 sc->sc_imask &= ~IEVENT_BSY; 2476 etsec_write(sc, IMASK, sc->sc_imask); 2477 } 2478 if (ievent & IEVENT_TXE) { 2479 soft_flags |= SOFT_TXERROR; 2480 sc->sc_imask &= ~IEVENT_TXE; 2481 sc->sc_txerrors |= ievent; 2482 } 2483 if (ievent & IEVENT_TXC) { 2484 sc->sc_ev_tx_pause.ev_count++; 2485 } 2486 if (ievent & IEVENT_RXC) { 2487 sc->sc_ev_rx_pause.ev_count++; 2488 } 2489 if (ievent & IEVENT_DPE) { 2490 soft_flags |= SOFT_RESET; 2491 sc->sc_imask &= ~IEVENT_DPE; 2492 etsec_write(sc, IMASK, sc->sc_imask); 2493 } 2494 } 2495 } 2496 2497 void 2498 pq3etsec_soft_intr(void *arg) 2499 { 2500 struct pq3etsec_softc * const sc = arg; 2501 struct ifnet * const ifp = &sc->sc_if; 2502 uint32_t imask = 0; 2503 2504 mutex_enter(sc->sc_lock); 2505 2506 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2507 2508 sc->sc_ev_soft_intr.ev_count++; 2509 2510 if (soft_flags & SOFT_RESET) { 2511 int s = splnet(); 2512 pq3etsec_ifinit(ifp); 2513 splx(s); 2514 soft_flags = 0; 2515 } 2516 2517 if (soft_flags & SOFT_RXBSY) { 2518 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2519 size_t threshold = 5 * rxq->rxq_threshold / 4; 2520 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2521 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2522 } else { 2523 imask |= IEVENT_BSY; 2524 } 2525 aprint_normal_dev(sc->sc_dev, 2526 "increasing receive buffers from %zu to %zu\n", 2527 rxq->rxq_threshold, threshold); 2528 rxq->rxq_threshold = threshold; 2529 } 2530 2531 if ((soft_flags & SOFT_TXINTR) 2532 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2533 /* 2534 * Let's do what we came here for. Consume transmitted 2535 * packets off the transmit ring. 2536 */ 2537 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2538 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2539 sc->sc_ev_tx_stall.ev_count++; 2540 ifp->if_flags |= IFF_OACTIVE; 2541 } else { 2542 ifp->if_flags &= ~IFF_OACTIVE; 2543 } 2544 imask |= IEVENT_TXF; 2545 } 2546 2547 if (soft_flags & (SOFT_RXINTR | SOFT_RXBSY)) { 2548 /* Let's consume */ 2549 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2550 imask |= IEVENT_RXF; 2551 } 2552 2553 if (soft_flags & SOFT_TXERROR) { 2554 pq3etsec_tx_error(sc); 2555 imask |= IEVENT_TXE; 2556 } 2557 2558 if (ifp->if_flags & IFF_RUNNING) { 2559 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2560 mutex_spin_enter(sc->sc_hwlock); 2561 sc->sc_imask |= imask; 2562 etsec_write(sc, IMASK, sc->sc_imask); 2563 mutex_spin_exit(sc->sc_hwlock); 2564 } else { 2565 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2566 } 2567 2568 mutex_exit(sc->sc_lock); 2569 } 2570 2571 static void 2572 pq3etsec_mii_tick(void *arg) 2573 { 2574 struct pq3etsec_softc * const sc = arg; 2575 mutex_enter(sc->sc_lock); 2576 callout_ack(&sc->sc_mii_callout); 2577 sc->sc_ev_mii_ticks.ev_count++; 2578 #ifdef DEBUG 2579 uint64_t now = mftb(); 2580 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2581 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2582 __func__, now - sc->sc_mii_last_tick); 2583 callout_stop(&sc->sc_mii_callout); 2584 } 2585 #endif 2586 mii_tick(&sc->sc_mii); 2587 int s = splnet(); 2588 if (sc->sc_soft_flags & SOFT_RESET) 2589 softint_schedule(sc->sc_soft_ih); 2590 splx(s); 2591 callout_schedule(&sc->sc_mii_callout, hz); 2592 #ifdef DEBUG 2593 sc->sc_mii_last_tick = now; 2594 #endif 2595 mutex_exit(sc->sc_lock); 2596 } 2597 2598 static void 2599 pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 2600 { 2601 uint32_t reg; 2602 2603 if (ETSEC_IC_RX_ENABLED(sc)) { 2604 reg = RXIC_ICEN; 2605 reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 2606 reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 2607 } else { 2608 /* Disable RX interrupt coalescing */ 2609 reg = 0; 2610 } 2611 2612 etsec_write(sc, RXIC, reg); 2613 } 2614 2615 static void 2616 pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 2617 { 2618 uint32_t reg; 2619 2620 if (ETSEC_IC_TX_ENABLED(sc)) { 2621 reg = TXIC_ICEN; 2622 reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 2623 reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 2624 } else { 2625 /* Disable TX interrupt coalescing */ 2626 reg = 0; 2627 } 2628 2629 etsec_write(sc, TXIC, reg); 2630 } 2631 2632 /* 2633 * sysctl 2634 */ 2635 static int 2636 pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 2637 { 2638 struct sysctlnode node = *rnode; 2639 struct pq3etsec_softc *sc = rnode->sysctl_data; 2640 int value = *valuep; 2641 int error; 2642 2643 node.sysctl_data = &value; 2644 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2645 if (error != 0 || newp == NULL) 2646 return error; 2647 2648 if (value < 0 || value > 65535) 2649 return EINVAL; 2650 2651 mutex_enter(sc->sc_lock); 2652 *valuep = value; 2653 if (valuep == &sc->sc_ic_rx_time) 2654 pq3etsec_set_ic_rx(sc); 2655 else 2656 pq3etsec_set_ic_tx(sc); 2657 mutex_exit(sc->sc_lock); 2658 2659 return 0; 2660 } 2661 2662 static int 2663 pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 2664 { 2665 struct sysctlnode node = *rnode; 2666 struct pq3etsec_softc *sc = rnode->sysctl_data; 2667 int value = *valuep; 2668 int error; 2669 2670 node.sysctl_data = &value; 2671 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2672 if (error != 0 || newp == NULL) 2673 return error; 2674 2675 if (value < 0 || value > 255) 2676 return EINVAL; 2677 2678 mutex_enter(sc->sc_lock); 2679 *valuep = value; 2680 if (valuep == &sc->sc_ic_rx_count) 2681 pq3etsec_set_ic_rx(sc); 2682 else 2683 pq3etsec_set_ic_tx(sc); 2684 mutex_exit(sc->sc_lock); 2685 2686 return 0; 2687 } 2688 2689 static int 2690 pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 2691 { 2692 struct pq3etsec_softc *sc = rnode->sysctl_data; 2693 2694 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2695 &sc->sc_ic_rx_time); 2696 } 2697 2698 static int 2699 pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 2700 { 2701 struct pq3etsec_softc *sc = rnode->sysctl_data; 2702 2703 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2704 &sc->sc_ic_rx_count); 2705 } 2706 2707 static int 2708 pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 2709 { 2710 struct pq3etsec_softc *sc = rnode->sysctl_data; 2711 2712 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2713 &sc->sc_ic_tx_time); 2714 } 2715 2716 static int 2717 pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 2718 { 2719 struct pq3etsec_softc *sc = rnode->sysctl_data; 2720 2721 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2722 &sc->sc_ic_tx_count); 2723 } 2724 2725 static void pq3etsec_sysctl_setup(struct sysctllog **clog, 2726 struct pq3etsec_softc *sc) 2727 { 2728 const struct sysctlnode *cnode, *rnode; 2729 2730 if (sysctl_createv(clog, 0, NULL, &rnode, 2731 CTLFLAG_PERMANENT, 2732 CTLTYPE_NODE, device_xname(sc->sc_dev), 2733 SYSCTL_DESCR("TSEC interface"), 2734 NULL, 0, NULL, 0, 2735 CTL_HW, CTL_CREATE, CTL_EOL) != 0) 2736 goto bad; 2737 2738 if (sysctl_createv(clog, 0, &rnode, &rnode, 2739 CTLFLAG_PERMANENT, 2740 CTLTYPE_NODE, "int_coal", 2741 SYSCTL_DESCR("Interrupts coalescing"), 2742 NULL, 0, NULL, 0, 2743 CTL_CREATE, CTL_EOL) != 0) 2744 goto bad; 2745 2746 if (sysctl_createv(clog, 0, &rnode, &cnode, 2747 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2748 CTLTYPE_INT, "rx_time", 2749 SYSCTL_DESCR("RX time threshold (0-65535)"), 2750 pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 2751 CTL_CREATE, CTL_EOL) != 0) 2752 goto bad; 2753 2754 if (sysctl_createv(clog, 0, &rnode, &cnode, 2755 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2756 CTLTYPE_INT, "rx_count", 2757 SYSCTL_DESCR("RX frame count threshold (0-255)"), 2758 pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 2759 CTL_CREATE, CTL_EOL) != 0) 2760 goto bad; 2761 2762 if (sysctl_createv(clog, 0, &rnode, &cnode, 2763 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2764 CTLTYPE_INT, "tx_time", 2765 SYSCTL_DESCR("TX time threshold (0-65535)"), 2766 pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 2767 CTL_CREATE, CTL_EOL) != 0) 2768 goto bad; 2769 2770 if (sysctl_createv(clog, 0, &rnode, &cnode, 2771 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2772 CTLTYPE_INT, "tx_count", 2773 SYSCTL_DESCR("TX frame count threshold (0-255)"), 2774 pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 2775 CTL_CREATE, CTL_EOL) != 0) 2776 goto bad; 2777 2778 return; 2779 2780 bad: 2781 aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 2782 } 2783