1 /* $NetBSD: pq3etsec.c,v 1.10 2012/02/21 02:08:55 matt Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 39 #include <sys/cdefs.h> 40 41 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.10 2012/02/21 02:08:55 matt Exp $"); 42 43 #include <sys/param.h> 44 #include <sys/cpu.h> 45 #include <sys/device.h> 46 #include <sys/mbuf.h> 47 #include <sys/ioctl.h> 48 #include <sys/intr.h> 49 #include <sys/bus.h> 50 #include <sys/kernel.h> 51 #include <sys/kmem.h> 52 #include <sys/proc.h> 53 #include <sys/atomic.h> 54 #include <sys/callout.h> 55 56 #include <net/if.h> 57 #include <net/if_dl.h> 58 #include <net/if_ether.h> 59 #include <net/if_media.h> 60 61 #include <dev/mii/miivar.h> 62 63 #include "ioconf.h" 64 65 #include <net/bpf.h> 66 67 #ifdef INET 68 #include <netinet/in.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/ip.h> 71 #include <netinet/in_offload.h> 72 #endif /* INET */ 73 #ifdef INET6 74 #include <netinet6/in6.h> 75 #include <netinet/ip6.h> 76 #endif 77 #include <netinet6/in6_offload.h> 78 79 80 #include <powerpc/spr.h> 81 #include <powerpc/booke/spr.h> 82 83 #include <powerpc/booke/cpuvar.h> 84 #include <powerpc/booke/e500var.h> 85 #include <powerpc/booke/e500reg.h> 86 #include <powerpc/booke/etsecreg.h> 87 88 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 89 90 #define ETSEC_MAXTXMBUFS 30 91 #define ETSEC_NTXSEGS 30 92 #define ETSEC_MAXRXMBUFS 511 93 #define ETSEC_MINRXMBUFS 32 94 #define ETSEC_NRXSEGS 1 95 96 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 97 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 98 |IFCAP_CSUM_UDPv4_Rx\ 99 |IFCAP_CSUM_TCPv6_Rx\ 100 |IFCAP_CSUM_UDPv6_Rx) 101 102 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 103 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 104 |IFCAP_CSUM_UDPv4_Tx\ 105 |IFCAP_CSUM_TCPv6_Tx\ 106 |IFCAP_CSUM_UDPv6_Tx) 107 108 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 109 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 110 111 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 112 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 113 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 114 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 115 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 116 #define M_CSUM_CIP (M_CSUM_IPv4) 117 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 118 119 struct pq3etsec_txqueue { 120 bus_dmamap_t txq_descmap; 121 volatile struct txbd *txq_consumer; 122 volatile struct txbd *txq_producer; 123 volatile struct txbd *txq_first; 124 volatile struct txbd *txq_last; 125 struct ifqueue txq_mbufs; 126 struct mbuf *txq_next; 127 #ifdef ETSEC_DEBUG 128 struct mbuf *txq_lmbufs[512]; 129 #endif 130 uint32_t txq_qmask; 131 uint32_t txq_free; 132 uint32_t txq_threshold; 133 uint32_t txq_lastintr; 134 bus_size_t txq_reg_tbase; 135 bus_dma_segment_t txq_descmap_seg; 136 }; 137 138 struct pq3etsec_rxqueue { 139 bus_dmamap_t rxq_descmap; 140 volatile struct rxbd *rxq_consumer; 141 volatile struct rxbd *rxq_producer; 142 volatile struct rxbd *rxq_first; 143 volatile struct rxbd *rxq_last; 144 struct mbuf *rxq_mhead; 145 struct mbuf **rxq_mtail; 146 struct mbuf *rxq_mconsumer; 147 #ifdef ETSEC_DEBUG 148 struct mbuf *rxq_mbufs[512]; 149 #endif 150 uint32_t rxq_qmask; 151 uint32_t rxq_inuse; 152 uint32_t rxq_threshold; 153 bus_size_t rxq_reg_rbase; 154 bus_size_t rxq_reg_rbptr; 155 bus_dma_segment_t rxq_descmap_seg; 156 }; 157 158 struct pq3etsec_mapcache { 159 u_int dmc_nmaps; 160 u_int dmc_maxseg; 161 u_int dmc_maxmaps; 162 u_int dmc_maxmapsize; 163 bus_dmamap_t dmc_maps[0]; 164 }; 165 166 struct pq3etsec_softc { 167 device_t sc_dev; 168 struct ethercom sc_ec; 169 #define sc_if sc_ec.ec_if 170 struct mii_data sc_mii; 171 bus_space_tag_t sc_bst; 172 bus_space_handle_t sc_bsh; 173 bus_dma_tag_t sc_dmat; 174 int sc_phy_addr; 175 prop_dictionary_t sc_intrmap; 176 uint32_t sc_intrmask; 177 178 uint32_t sc_soft_flags; 179 #define SOFT_RESET 0x0001 180 #define SOFT_RXINTR 0x0010 181 #define SOFT_RXBSY 0x0020 182 #define SOFT_TXINTR 0x0100 183 #define SOFT_TXERROR 0x0200 184 185 struct pq3etsec_txqueue sc_txq; 186 struct pq3etsec_rxqueue sc_rxq; 187 uint32_t sc_txerrors; 188 uint32_t sc_rxerrors; 189 190 size_t sc_rx_adjlen; 191 192 /* 193 * Copies of various ETSEC registers. 194 */ 195 uint32_t sc_imask; 196 uint32_t sc_maccfg1; 197 uint32_t sc_maccfg2; 198 uint32_t sc_maxfrm; 199 uint32_t sc_ecntrl; 200 uint32_t sc_dmactrl; 201 uint32_t sc_macstnaddr1; 202 uint32_t sc_macstnaddr2; 203 uint32_t sc_tctrl; 204 uint32_t sc_rctrl; 205 uint32_t sc_gaddr[16]; 206 uint64_t sc_macaddrs[15]; 207 208 void *sc_tx_ih; 209 void *sc_rx_ih; 210 void *sc_error_ih; 211 void *sc_soft_ih; 212 213 kmutex_t *sc_lock; 214 215 struct evcnt sc_ev_tx_stall; 216 struct evcnt sc_ev_tx_intr; 217 struct evcnt sc_ev_rx_stall; 218 struct evcnt sc_ev_rx_intr; 219 struct evcnt sc_ev_error_intr; 220 struct evcnt sc_ev_soft_intr; 221 struct evcnt sc_ev_tx_pause; 222 struct evcnt sc_ev_rx_pause; 223 struct evcnt sc_ev_mii_ticks; 224 225 struct callout sc_mii_callout; 226 uint64_t sc_mii_last_tick; 227 228 struct ifqueue sc_rx_bufcache; 229 struct pq3etsec_mapcache *sc_rx_mapcache; 230 struct pq3etsec_mapcache *sc_tx_mapcache; 231 }; 232 233 static int pq3etsec_match(device_t, cfdata_t, void *); 234 static void pq3etsec_attach(device_t, device_t, void *); 235 236 static void pq3etsec_ifstart(struct ifnet *); 237 static void pq3etsec_ifwatchdog(struct ifnet *); 238 static int pq3etsec_ifinit(struct ifnet *); 239 static void pq3etsec_ifstop(struct ifnet *, int); 240 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 241 242 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 243 struct pq3etsec_mapcache **, size_t, size_t, size_t); 244 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 245 struct pq3etsec_mapcache *); 246 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 247 struct pq3etsec_mapcache *); 248 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 249 struct pq3etsec_mapcache *, bus_dmamap_t); 250 251 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 252 struct pq3etsec_txqueue *, u_int); 253 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 254 struct pq3etsec_txqueue *); 255 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 256 struct pq3etsec_txqueue *); 257 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 258 struct pq3etsec_txqueue *); 259 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 260 struct pq3etsec_txqueue *, struct mbuf *m); 261 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 262 struct pq3etsec_txqueue *); 263 264 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 265 struct pq3etsec_rxqueue *, u_int); 266 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 267 struct pq3etsec_rxqueue *); 268 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 269 struct pq3etsec_rxqueue *, bool); 270 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 271 struct pq3etsec_rxqueue *); 272 273 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 274 275 static void pq3etsec_mii_tick(void *); 276 static int pq3etsec_rx_intr(void *); 277 static int pq3etsec_tx_intr(void *); 278 static int pq3etsec_error_intr(void *); 279 static void pq3etsec_soft_intr(void *); 280 281 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 282 pq3etsec_match, pq3etsec_attach, NULL, NULL); 283 284 static int 285 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 286 { 287 288 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 289 return 0; 290 291 return 1; 292 } 293 294 static inline uint32_t 295 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 296 { 297 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 298 } 299 300 static inline void 301 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 302 { 303 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 304 } 305 306 static int 307 pq3etsec_mii_readreg(device_t self, int phy, int reg) 308 { 309 struct pq3etsec_softc * const sc = device_private(self); 310 uint32_t miimcom = etsec_read(sc, MIIMCOM); 311 312 // int s = splnet(); 313 314 etsec_write(sc, MIIMADD, 315 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 316 317 etsec_write(sc, IEVENT, IEVENT_MMRD); 318 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */ 319 etsec_write(sc, MIIMCOM, MIIMCOM_READ); 320 #if 0 321 sc->sc_imask |= IEVENT_MMRD; 322 etsec_write(sc, IMASK, sc->sc_imask); 323 #endif 324 325 while (etsec_read(sc, MIIMIND) != 0) { 326 delay(1); 327 } 328 int data = etsec_read(sc, MIIMSTAT); 329 330 if (miimcom == MIIMCOM_SCAN) 331 etsec_write(sc, MIIMCOM, miimcom); 332 333 #if 0 334 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n", 335 __func__, phy, reg, data); 336 #endif 337 etsec_write(sc, IEVENT, IEVENT_MMRD); 338 // splx(s); 339 return data; 340 } 341 342 static void 343 pq3etsec_mii_writereg(device_t self, int phy, int reg, int data) 344 { 345 struct pq3etsec_softc * const sc = device_private(self); 346 uint32_t miimcom = etsec_read(sc, MIIMCOM); 347 348 #if 0 349 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n", 350 __func__, phy, reg, data); 351 #endif 352 353 // int s = splnet(); 354 etsec_write(sc, IEVENT, IEVENT_MMWR); 355 etsec_write(sc, MIIMADD, 356 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 357 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */ 358 etsec_write(sc, MIIMCON, data); 359 360 #if 0 361 sc->sc_imask |= IEVENT_MMWR; 362 etsec_write(sc, IMASK, sc->sc_imask); 363 #endif 364 365 int timo = 1000; /* 1ms */ 366 while ((etsec_read(sc, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 367 delay(1); 368 } 369 370 if (miimcom == MIIMCOM_SCAN) 371 etsec_write(sc, MIIMCOM, miimcom); 372 etsec_write(sc, IEVENT, IEVENT_MMWR); 373 // splx(s); 374 } 375 376 static void 377 pq3etsec_mii_statchg(device_t self) 378 { 379 struct pq3etsec_softc * const sc = device_private(self); 380 struct mii_data * const mii = &sc->sc_mii; 381 382 uint32_t maccfg1 = sc->sc_maccfg1; 383 uint32_t maccfg2 = sc->sc_maccfg2; 384 uint32_t ecntrl = sc->sc_ecntrl; 385 386 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 387 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 388 389 if (sc->sc_mii.mii_media_active & IFM_FDX) { 390 maccfg2 |= MACCFG2_FD; 391 } 392 393 /* 394 * Now deal with the flow control bits. 395 */ 396 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 397 && (mii->mii_media_active & IFM_ETH_FMASK)) { 398 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 399 maccfg1 |= MACCFG1_RX_FLOW; 400 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 401 maccfg1 |= MACCFG1_TX_FLOW; 402 } 403 404 /* 405 * Now deal with the speed. 406 */ 407 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 408 maccfg2 |= MACCFG2_IFMODE_GMII; 409 } else { 410 maccfg2 |= MACCFG2_IFMODE_MII; 411 ecntrl &= ~ECNTRL_R100M; 412 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 413 ecntrl |= ECNTRL_R100M; 414 } 415 } 416 417 /* 418 * If things are different, re-init things. 419 */ 420 if (maccfg1 != sc->sc_maccfg1 421 || maccfg2 != sc->sc_maccfg2 422 || ecntrl != sc->sc_ecntrl) { 423 if (sc->sc_if.if_flags & IFF_RUNNING) 424 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 425 sc->sc_maccfg1 = maccfg1; 426 sc->sc_maccfg2 = maccfg2; 427 sc->sc_ecntrl = ecntrl; 428 } 429 } 430 431 #if 0 432 static void 433 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 434 { 435 struct pq3etsec_softc * const sc = ifp->if_softc; 436 437 mii_pollstat(&sc->sc_mii); 438 ether_mediastatus(ifp, ifmr); 439 ifmr->ifm_status = sc->sc_mii.mii_media_status; 440 ifmr->ifm_active = sc->sc_mii.mii_media_active; 441 } 442 443 static int 444 pq3etsec_mediachange(struct ifnet *ifp) 445 { 446 struct pq3etsec_softc * const sc = ifp->if_softc; 447 448 if ((ifp->if_flags & IFF_UP) == 0) 449 return 0; 450 451 int rv = mii_mediachg(&sc->sc_mii); 452 return (rv == ENXIO) ? 0 : rv; 453 } 454 #endif 455 456 static void 457 pq3etsec_attach(device_t parent, device_t self, void *aux) 458 { 459 struct cpunode_softc * const psc = device_private(parent); 460 struct pq3etsec_softc * const sc = device_private(self); 461 struct cpunode_attach_args * const cna = aux; 462 struct cpunode_locators * const cnl = &cna->cna_locs; 463 cfdata_t cf = device_cfdata(self); 464 int error; 465 466 psc->sc_children |= cna->cna_childmask; 467 sc->sc_dev = self; 468 sc->sc_bst = cna->cna_memt; 469 sc->sc_dmat = &booke_bus_dma_tag; 470 471 /* 472 * If we have a common MDIO bus, if all off instance 1. 473 */ 474 device_t miiself = (cf->cf_flags & 0x100) ? tsec_cd.cd_devs[0] : self; 475 476 /* 477 * See if the phy is in the config file... 478 */ 479 if (cf->cf_flags & 0x3f) { 480 sc->sc_phy_addr = (cf->cf_flags & 0x3f) - 1; 481 } else { 482 unsigned char prop_name[20]; 483 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 484 cnl->cnl_instance); 485 sc->sc_phy_addr = board_info_get_number(prop_name); 486 } 487 if (sc->sc_phy_addr != MII_PHY_ANY) 488 aprint_normal(" phy %d", sc->sc_phy_addr); 489 490 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 491 &sc->sc_bsh); 492 if (error) { 493 aprint_error(": error mapping registers: %d\n", error); 494 return; 495 } 496 497 /* 498 * Assume firmware has aready set the mac address and fetch it 499 * before we reinit it. 500 */ 501 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 502 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 503 sc->sc_rctrl = RCTRL_DEFAULT; 504 sc->sc_maccfg2 = MACCFG2_DEFAULT; 505 506 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 507 size_t len; 508 const uint8_t *mac_addr = 509 board_info_get_data("tsec-mac-addr-base", &len); 510 KASSERT(len == ETHER_ADDR_LEN); 511 sc->sc_macstnaddr2 = 512 (mac_addr[1] << 24) 513 | (mac_addr[0] << 16); 514 sc->sc_macstnaddr1 = 515 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 516 | (mac_addr[4] << 16) 517 | (mac_addr[3] << 8) 518 | (mac_addr[2] << 0); 519 #if 0 520 aprint_error(": mac-address unknown\n"); 521 return; 522 #endif 523 } 524 525 char enaddr[ETHER_ADDR_LEN] = { 526 [0] = sc->sc_macstnaddr2 >> 16, 527 [1] = sc->sc_macstnaddr2 >> 24, 528 [2] = sc->sc_macstnaddr1 >> 0, 529 [3] = sc->sc_macstnaddr1 >> 8, 530 [4] = sc->sc_macstnaddr1 >> 16, 531 [5] = sc->sc_macstnaddr1 >> 24, 532 }; 533 534 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 535 if (error) { 536 aprint_error(": failed to init rxq: %d\n", error); 537 return; 538 } 539 540 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 541 if (error) { 542 aprint_error(": failed to init txq: %d\n", error); 543 return; 544 } 545 546 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 547 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 548 if (error) { 549 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 550 return; 551 } 552 553 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 554 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 555 if (error) { 556 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 557 return; 558 } 559 560 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 561 pq3etsec_tx_intr, sc); 562 if (sc->sc_tx_ih == NULL) { 563 aprint_error(": failed to establish tx interrupt: %d\n", 564 cnl->cnl_intrs[0]); 565 return; 566 } 567 568 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 569 pq3etsec_rx_intr, sc); 570 if (sc->sc_rx_ih == NULL) { 571 aprint_error(": failed to establish rx interrupt: %d\n", 572 cnl->cnl_intrs[1]); 573 return; 574 } 575 576 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 577 pq3etsec_error_intr, sc); 578 if (sc->sc_error_ih == NULL) { 579 aprint_error(": failed to establish error interrupt: %d\n", 580 cnl->cnl_intrs[2]); 581 return; 582 } 583 584 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 585 pq3etsec_soft_intr, sc); 586 if (sc->sc_soft_ih == NULL) { 587 aprint_error(": failed to establish soft interrupt\n"); 588 return; 589 } 590 591 aprint_normal("\n"); 592 593 etsec_write(sc, ATTR, ATTR_DEFAULT); 594 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 595 596 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 597 598 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 599 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 600 601 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 602 ether_sprintf(enaddr)); 603 604 const char * const xname = device_xname(sc->sc_dev); 605 struct ethercom * const ec = &sc->sc_ec; 606 struct ifnet * const ifp = &ec->ec_if; 607 608 ec->ec_mii = &sc->sc_mii; 609 610 sc->sc_mii.mii_ifp = ifp; 611 sc->sc_mii.mii_readreg = pq3etsec_mii_readreg; 612 sc->sc_mii.mii_writereg = pq3etsec_mii_writereg; 613 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 614 615 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 616 ether_mediastatus); 617 618 if (sc->sc_phy_addr < 32) { 619 mii_attach(miiself, &sc->sc_mii, 0xffffffff, 620 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 621 622 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 623 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 624 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 625 } else { 626 callout_schedule(&sc->sc_mii_callout, hz); 627 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 628 } 629 } else { 630 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 631 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 632 } 633 634 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 635 | ETHERCAP_JUMBO_MTU; 636 637 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 638 ifp->if_softc = sc; 639 ifp->if_capabilities = IFCAP_ETSEC; 640 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 641 ifp->if_ioctl = pq3etsec_ifioctl; 642 ifp->if_start = pq3etsec_ifstart; 643 ifp->if_watchdog = pq3etsec_ifwatchdog; 644 ifp->if_init = pq3etsec_ifinit; 645 ifp->if_stop = pq3etsec_ifstop; 646 IFQ_SET_READY(&ifp->if_snd); 647 648 pq3etsec_ifstop(ifp, true); 649 650 /* 651 * Attach the interface. 652 */ 653 if_attach(ifp); 654 ether_ifattach(ifp, enaddr); 655 656 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 657 NULL, xname, "rx stall"); 658 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 659 NULL, xname, "tx stall"); 660 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 661 NULL, xname, "tx intr"); 662 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 663 NULL, xname, "rx intr"); 664 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 665 NULL, xname, "error intr"); 666 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 667 NULL, xname, "soft intr"); 668 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 669 NULL, xname, "tx pause"); 670 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 671 NULL, xname, "rx pause"); 672 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 673 NULL, xname, "mii ticks"); 674 } 675 676 static uint64_t 677 pq3etsec_macaddr_create(const uint8_t *lladdr) 678 { 679 uint64_t macaddr = 0; 680 681 lladdr += ETHER_ADDR_LEN; 682 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 683 macaddr = (macaddr << 8) | *--lladdr; 684 } 685 return macaddr << 16; 686 } 687 688 static int 689 pq3etsec_ifinit(struct ifnet *ifp) 690 { 691 struct pq3etsec_softc * const sc = ifp->if_softc; 692 int error = 0; 693 694 KASSERT(!cpu_softintr_p()); 695 696 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 697 if (ifp->if_mtu > ETHERMTU_JUMBO) 698 return error; 699 700 KASSERT(ifp->if_flags & IFF_UP); 701 702 /* 703 * Stop the interface (steps 1 to 4 in the Soft Reset and 704 * Reconfigurating Procedure. 705 */ 706 pq3etsec_ifstop(ifp, 0); 707 708 /* 709 * If our frame size has changed (or it's our first time through) 710 * destroy the existing transmit mapcache. 711 */ 712 if (sc->sc_tx_mapcache != NULL 713 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 714 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 715 sc->sc_tx_mapcache = NULL; 716 } 717 718 if (sc->sc_tx_mapcache == NULL) { 719 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 720 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 721 if (error) 722 return error; 723 } 724 725 sc->sc_ev_mii_ticks.ev_count++; 726 mii_tick(&sc->sc_mii); 727 728 if (ifp->if_flags & IFF_PROMISC) { 729 sc->sc_rctrl |= RCTRL_PROM; 730 } else { 731 sc->sc_rctrl &= ~RCTRL_PROM; 732 } 733 734 uint32_t rctrl_prsdep = 0; 735 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 736 if (VLAN_ATTACHED(&sc->sc_ec)) { 737 sc->sc_rctrl |= RCTRL_VLEX; 738 rctrl_prsdep = RCTRL_PRSDEP_L2; 739 } 740 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 741 sc->sc_rctrl |= RCTRL_IPCSEN; 742 rctrl_prsdep = RCTRL_PRSDEP_L3; 743 } 744 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 745 sc->sc_rctrl |= RCTRL_TUCSEN; 746 rctrl_prsdep = RCTRL_PRSDEP_L4; 747 } 748 sc->sc_rctrl |= rctrl_prsdep; 749 #if 0 750 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 751 aprint_normal_dev(sc->sc_dev, 752 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 753 sc->sc_rctrl, 754 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 755 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 756 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 757 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 758 #endif 759 760 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 761 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 762 sc->sc_tctrl |= TCTRL_VLINS; 763 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 764 sc->sc_tctrl |= TCTRL_IPCSEN; 765 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 766 sc->sc_tctrl |= TCTRL_TUCSEN; 767 #if 0 768 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 769 aprint_normal_dev(sc->sc_dev, 770 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 771 sc->sc_tctrl, 772 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 773 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 774 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 775 #endif 776 777 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 778 779 const uint64_t macstnaddr = 780 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 781 782 sc->sc_imask = IEVENT_DPE; 783 784 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 785 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 786 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 787 788 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 789 pq3etsec_txq_reset(sc, &sc->sc_txq); 790 791 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 792 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 793 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 794 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 795 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 796 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 797 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 798 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 799 800 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 801 pq3etsec_mc_setup(sc); 802 803 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 804 etsec_write(sc, MRBLR, MCLBYTES); 805 806 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 807 sc->sc_dmactrl |= DMACTRL_DEFAULT; 808 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 809 810 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 811 etsec_write(sc, TQUEUE, TQUEUE_EN0); 812 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 813 814 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 815 816 /* 12. Enable receive queues in RQUEUE, */ 817 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 818 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 819 820 /* and optionally set TOE functionality in RCTRL. */ 821 etsec_write(sc, RCTRL, sc->sc_rctrl); 822 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 823 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 824 sc->sc_rx_adjlen += sizeof(struct rxfcb); 825 826 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 827 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 828 829 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 830 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 831 832 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 833 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 834 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 835 836 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 837 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 838 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 839 840 sc->sc_soft_flags = 0; 841 842 etsec_write(sc, IMASK, sc->sc_imask); 843 844 ifp->if_flags |= IFF_RUNNING; 845 846 return error; 847 } 848 849 static void 850 pq3etsec_ifstop(struct ifnet *ifp, int disable) 851 { 852 struct pq3etsec_softc * const sc = ifp->if_softc; 853 854 KASSERT(!cpu_intr_p()); 855 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 856 /* 857 * Clear the GTSC and GRSC from the interrupt mask until 858 * we are ready for them. Then clear them from IEVENT, 859 * request the graceful shutdown, and then enable the 860 * GTSC and GRSC bits in the mask. This should cause the 861 * error interrupt to fire which will issue a wakeup to 862 * allow us to resume. 863 */ 864 865 /* 866 * 1. Set GRS/GTS bits in DMACTRL register 867 */ 868 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 869 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 870 etsec_write(sc, IEVENT, imask_gsc_mask); 871 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 872 873 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 874 /* 875 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 876 */ 877 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 878 879 u_int timo = 1000; 880 uint32_t ievent = etsec_read(sc, IEVENT); 881 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 882 if (--timo == 0) { 883 aprint_error_dev(sc->sc_dev, 884 "WARNING: " 885 "request to stop failed (IEVENT=%#x)\n", 886 ievent); 887 break; 888 } 889 delay(10); 890 ievent = etsec_read(sc, IEVENT); 891 } 892 } 893 894 /* 895 * Now reset the controller. 896 * 897 * 3. Set SOFT_RESET bit in MACCFG1 register 898 * 4. Clear SOFT_RESET bit in MACCFG1 register 899 */ 900 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 901 etsec_write(sc, MACCFG1, 0); 902 etsec_write(sc, IMASK, 0); 903 etsec_write(sc, IEVENT, ~0); 904 sc->sc_imask = 0; 905 ifp->if_flags &= ~IFF_RUNNING; 906 907 uint32_t tbipa = etsec_read(sc, TBIPA); 908 if (tbipa == sc->sc_phy_addr) { 909 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 910 etsec_write(sc, TBIPA, 0x1f); 911 } 912 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 913 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 914 etsec_write(sc, MIIMCFG, miimcfg); 915 916 /* 917 * Let's consume any remaing transmitted packets. And if we are 918 * disabling the interface, purge ourselves of any untransmitted 919 * packets. But don't consume any received packets, just drop them. 920 * If we aren't disabling the interface, save the mbufs in the 921 * receive queue for reuse. 922 */ 923 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 924 pq3etsec_txq_consume(sc, &sc->sc_txq); 925 if (disable) { 926 pq3etsec_txq_purge(sc, &sc->sc_txq); 927 IF_PURGE(&ifp->if_snd); 928 } 929 } 930 931 static void 932 pq3etsec_ifwatchdog(struct ifnet *ifp) 933 { 934 } 935 936 static void 937 pq3etsec_mc_setup( 938 struct pq3etsec_softc *sc) 939 { 940 struct ethercom * const ec = &sc->sc_ec; 941 struct ifnet * const ifp = &sc->sc_if; 942 struct ether_multi *enm; 943 struct ether_multistep step; 944 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 945 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 946 947 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 948 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 949 950 ifp->if_flags &= ~IFF_ALLMULTI; 951 952 ETHER_FIRST_MULTI(step, ec, enm); 953 for (u_int i = 0; enm != NULL; ) { 954 const char *addr = enm->enm_addrlo; 955 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 956 ifp->if_flags |= IFF_ALLMULTI; 957 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 958 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 959 break; 960 } 961 if ((sc->sc_rctrl & RCTRL_EMEN) 962 && i < __arraycount(sc->sc_macaddrs)) { 963 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 964 } else { 965 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 966 #if 0 967 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 968 ether_sprintf(addr), crc, 969 crc >> crc_shift, 970 crc >> (crc_shift + 5), 971 (crc >> crc_shift) & 31, 972 1 << (((crc >> crc_shift) & 31) ^ 31)); 973 #endif 974 /* 975 * The documentation doesn't completely follow PowerPC 976 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 977 * is 0x7fa32d9b. By empirical testing, the 978 * corresponding hash bit is word 3, bit 31 (ppc bit 979 * order). Since 3 << 31 | 31 is 0x7f, we deduce 980 * H[0:2] selects the register while H[3:7] selects 981 * the bit (ppc bit order). 982 */ 983 crc >>= crc_shift; 984 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 985 } 986 ETHER_NEXT_MULTI(step, enm); 987 } 988 for (u_int i = 0; i < 8; i++) { 989 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 990 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 991 #if 0 992 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 993 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 994 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 995 i, GADDR(i), etsec_read(sc, GADDR(i))); 996 #endif 997 } 998 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 999 uint64_t macaddr = sc->sc_macaddrs[i]; 1000 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1001 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1002 #if 0 1003 if (macaddr) 1004 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1005 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1006 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1007 #endif 1008 } 1009 } 1010 1011 static int 1012 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1013 { 1014 struct pq3etsec_softc *sc = ifp->if_softc; 1015 struct ifreq * const ifr = data; 1016 const int s = splnet(); 1017 int error; 1018 1019 switch (cmd) { 1020 case SIOCSIFMEDIA: 1021 case SIOCGIFMEDIA: 1022 /* Flow control requires full-duplex mode. */ 1023 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1024 (ifr->ifr_media & IFM_FDX) == 0) 1025 ifr->ifr_media &= ~IFM_ETH_FMASK; 1026 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1027 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1028 /* We can do both TXPAUSE and RXPAUSE. */ 1029 ifr->ifr_media |= 1030 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1031 } 1032 } 1033 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1034 break; 1035 1036 default: 1037 error = ether_ioctl(ifp, cmd, data); 1038 if (error != ENETRESET) 1039 break; 1040 1041 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1042 error = 0; 1043 if (ifp->if_flags & IFF_RUNNING) 1044 pq3etsec_mc_setup(sc); 1045 break; 1046 } 1047 error = pq3etsec_ifinit(ifp); 1048 break; 1049 } 1050 1051 splx(s); 1052 return error; 1053 } 1054 1055 static void 1056 pq3etsec_rxq_desc_presync( 1057 struct pq3etsec_softc *sc, 1058 struct pq3etsec_rxqueue *rxq, 1059 volatile struct rxbd *rxbd, 1060 size_t count) 1061 { 1062 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1063 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1064 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1065 } 1066 1067 static void 1068 pq3etsec_rxq_desc_postsync( 1069 struct pq3etsec_softc *sc, 1070 struct pq3etsec_rxqueue *rxq, 1071 volatile struct rxbd *rxbd, 1072 size_t count) 1073 { 1074 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1075 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1076 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1077 } 1078 1079 static void 1080 pq3etsec_txq_desc_presync( 1081 struct pq3etsec_softc *sc, 1082 struct pq3etsec_txqueue *txq, 1083 volatile struct txbd *txbd, 1084 size_t count) 1085 { 1086 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1087 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1088 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1089 } 1090 1091 static void 1092 pq3etsec_txq_desc_postsync( 1093 struct pq3etsec_softc *sc, 1094 struct pq3etsec_txqueue *txq, 1095 volatile struct txbd *txbd, 1096 size_t count) 1097 { 1098 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1099 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1100 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1101 } 1102 1103 static bus_dmamap_t 1104 pq3etsec_mapcache_get( 1105 struct pq3etsec_softc *sc, 1106 struct pq3etsec_mapcache *dmc) 1107 { 1108 KASSERT(dmc->dmc_nmaps > 0); 1109 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1110 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1111 } 1112 1113 static void 1114 pq3etsec_mapcache_put( 1115 struct pq3etsec_softc *sc, 1116 struct pq3etsec_mapcache *dmc, 1117 bus_dmamap_t map) 1118 { 1119 KASSERT(map != NULL); 1120 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1121 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1122 } 1123 1124 static void 1125 pq3etsec_mapcache_destroy( 1126 struct pq3etsec_softc *sc, 1127 struct pq3etsec_mapcache *dmc) 1128 { 1129 const size_t dmc_size = 1130 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1131 1132 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1133 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1134 } 1135 kmem_free(dmc, dmc_size); 1136 } 1137 1138 static int 1139 pq3etsec_mapcache_create( 1140 struct pq3etsec_softc *sc, 1141 struct pq3etsec_mapcache **dmc_p, 1142 size_t maxmaps, 1143 size_t maxmapsize, 1144 size_t maxseg) 1145 { 1146 const size_t dmc_size = 1147 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1148 struct pq3etsec_mapcache * const dmc = kmem_zalloc(dmc_size, KM_SLEEP); 1149 1150 dmc->dmc_maxmaps = maxmaps; 1151 dmc->dmc_nmaps = maxmaps; 1152 dmc->dmc_maxmapsize = maxmapsize; 1153 dmc->dmc_maxseg = maxseg; 1154 1155 for (u_int i = 0; i < maxmaps; i++) { 1156 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1157 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1158 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1159 if (error) { 1160 aprint_error_dev(sc->sc_dev, 1161 "failed to creat dma map cache " 1162 "entry %u of %zu: %d\n", 1163 i, maxmaps, error); 1164 while (i-- > 0) { 1165 bus_dmamap_destroy(sc->sc_dmat, 1166 dmc->dmc_maps[i]); 1167 } 1168 kmem_free(dmc, dmc_size); 1169 return error; 1170 } 1171 KASSERT(dmc->dmc_maps[i] != NULL); 1172 } 1173 1174 *dmc_p = dmc; 1175 1176 return 0; 1177 } 1178 1179 #if 0 1180 static void 1181 pq3etsec_dmamem_free( 1182 bus_dma_tag_t dmat, 1183 size_t map_size, 1184 bus_dma_segment_t *seg, 1185 bus_dmamap_t map, 1186 void *kvap) 1187 { 1188 bus_dmamap_destroy(dmat, map); 1189 bus_dmamem_unmap(dmat, kvap, map_size); 1190 bus_dmamem_free(dmat, seg, 1); 1191 } 1192 #endif 1193 1194 static int 1195 pq3etsec_dmamem_alloc( 1196 bus_dma_tag_t dmat, 1197 size_t map_size, 1198 bus_dma_segment_t *seg, 1199 bus_dmamap_t *map, 1200 void **kvap) 1201 { 1202 int error; 1203 int nseg; 1204 1205 *kvap = NULL; 1206 *map = NULL; 1207 1208 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1209 seg, 1, &nseg, 0); 1210 if (error) 1211 return error; 1212 1213 KASSERT(nseg == 1); 1214 1215 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1216 BUS_DMA_COHERENT); 1217 if (error == 0) { 1218 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1219 map); 1220 if (error == 0) { 1221 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1222 NULL, 0); 1223 if (error == 0) 1224 return 0; 1225 bus_dmamap_destroy(dmat, *map); 1226 *map = NULL; 1227 } 1228 bus_dmamem_unmap(dmat, *kvap, map_size); 1229 *kvap = NULL; 1230 } 1231 bus_dmamem_free(dmat, seg, nseg); 1232 return 0; 1233 } 1234 1235 static struct mbuf * 1236 pq3etsec_rx_buf_alloc( 1237 struct pq3etsec_softc *sc) 1238 { 1239 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1240 if (m == NULL) { 1241 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1242 return NULL; 1243 } 1244 MCLGET(m, M_DONTWAIT); 1245 if ((m->m_flags & M_EXT) == 0) { 1246 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1247 m_freem(m); 1248 return NULL; 1249 } 1250 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1251 1252 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1253 if (map == NULL) { 1254 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1255 m_freem(m); 1256 return NULL; 1257 } 1258 M_SETCTX(m, map); 1259 m->m_len = m->m_pkthdr.len = MCLBYTES; 1260 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1261 BUS_DMA_READ|BUS_DMA_NOWAIT); 1262 if (error) { 1263 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1264 error); 1265 M_SETCTX(m, NULL); 1266 m_freem(m); 1267 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1268 return NULL; 1269 } 1270 KASSERT(map->dm_mapsize == MCLBYTES); 1271 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1272 BUS_DMASYNC_PREREAD); 1273 1274 return m; 1275 } 1276 1277 static void 1278 pq3etsec_rx_map_unload( 1279 struct pq3etsec_softc *sc, 1280 struct mbuf *m) 1281 { 1282 KASSERT(m); 1283 for (; m != NULL; m = m->m_next) { 1284 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1285 KASSERT(map); 1286 KASSERT(map->dm_mapsize == MCLBYTES); 1287 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1288 BUS_DMASYNC_POSTREAD); 1289 bus_dmamap_unload(sc->sc_dmat, map); 1290 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1291 M_SETCTX(m, NULL); 1292 } 1293 } 1294 1295 static bool 1296 pq3etsec_rxq_produce( 1297 struct pq3etsec_softc *sc, 1298 struct pq3etsec_rxqueue *rxq) 1299 { 1300 volatile struct rxbd *producer = rxq->rxq_producer; 1301 #if 0 1302 size_t inuse = rxq->rxq_inuse; 1303 #endif 1304 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1305 struct mbuf *m; 1306 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1307 if (m == NULL) { 1308 m = pq3etsec_rx_buf_alloc(sc); 1309 if (m == NULL) { 1310 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1311 break; 1312 } 1313 } 1314 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1315 KASSERT(map); 1316 1317 #ifdef ETSEC_DEBUG 1318 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1319 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1320 #endif 1321 1322 /* rxbd_len is write-only by the ETSEC */ 1323 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1324 membar_producer(); 1325 producer->rxbd_flags |= RXBD_E; 1326 if (__predict_false(rxq->rxq_mhead == NULL)) { 1327 KASSERT(producer == rxq->rxq_consumer); 1328 rxq->rxq_mconsumer = m; 1329 } 1330 *rxq->rxq_mtail = m; 1331 rxq->rxq_mtail = &m->m_next; 1332 m->m_len = MCLBYTES; 1333 m->m_next = NULL; 1334 rxq->rxq_inuse++; 1335 if (++producer == rxq->rxq_last) { 1336 membar_producer(); 1337 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1338 rxq->rxq_last - rxq->rxq_producer); 1339 producer = rxq->rxq_producer = rxq->rxq_first; 1340 } 1341 } 1342 if (producer != rxq->rxq_producer) { 1343 membar_producer(); 1344 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1345 producer - rxq->rxq_producer); 1346 rxq->rxq_producer = producer; 1347 } 1348 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1349 if (qhlt) { 1350 KASSERT(qhlt & rxq->rxq_qmask); 1351 sc->sc_ev_rx_stall.ev_count++; 1352 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1353 } 1354 #if 0 1355 aprint_normal_dev(sc->sc_dev, 1356 "%s: buffers inuse went from %zu to %zu\n", 1357 __func__, inuse, rxq->rxq_inuse); 1358 #endif 1359 return true; 1360 } 1361 1362 static bool 1363 pq3etsec_rx_offload( 1364 struct pq3etsec_softc *sc, 1365 struct mbuf *m, 1366 const struct rxfcb *fcb) 1367 { 1368 if (fcb->rxfcb_flags & RXFCB_VLN) { 1369 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl, 1370 m_freem(m); return false); 1371 } 1372 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1373 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1374 return true; 1375 int csum_flags = 0; 1376 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1377 csum_flags |= M_CSUM_IPv4; 1378 if (fcb->rxfcb_flags & RXFCB_EIP) 1379 csum_flags |= M_CSUM_IPv4_BAD; 1380 } 1381 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1382 int ipv_flags; 1383 if (fcb->rxfcb_flags & RXFCB_IP6) 1384 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1385 else 1386 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1387 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1388 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1389 } else { 1390 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1391 } 1392 if (fcb->rxfcb_flags & RXFCB_ETU) 1393 csum_flags |= M_CSUM_TCP_UDP_BAD; 1394 } 1395 1396 m->m_pkthdr.csum_flags = csum_flags; 1397 return true; 1398 } 1399 1400 static void 1401 pq3etsec_rx_input( 1402 struct pq3etsec_softc *sc, 1403 struct mbuf *m, 1404 uint16_t rxbd_flags) 1405 { 1406 struct ifnet * const ifp = &sc->sc_if; 1407 1408 pq3etsec_rx_map_unload(sc, m); 1409 1410 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1411 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1412 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1413 return; 1414 } 1415 m_adj(m, sc->sc_rx_adjlen); 1416 1417 if (rxbd_flags & RXBD_M) 1418 m->m_flags |= M_PROMISC; 1419 if (rxbd_flags & RXBD_BC) 1420 m->m_flags |= M_BCAST; 1421 if (rxbd_flags & RXBD_MC) 1422 m->m_flags |= M_MCAST; 1423 m->m_flags |= M_HASFCS; 1424 m->m_pkthdr.rcvif = &sc->sc_if; 1425 1426 ifp->if_ipackets++; 1427 ifp->if_ibytes += m->m_pkthdr.len; 1428 1429 /* 1430 * Let's give it to the network subsystm to deal with. 1431 */ 1432 int s = splnet(); 1433 bpf_mtap(ifp, m); 1434 (*ifp->if_input)(ifp, m); 1435 splx(s); 1436 } 1437 1438 static void 1439 pq3etsec_rxq_consume( 1440 struct pq3etsec_softc *sc, 1441 struct pq3etsec_rxqueue *rxq) 1442 { 1443 struct ifnet * const ifp = &sc->sc_if; 1444 volatile struct rxbd *consumer = rxq->rxq_consumer; 1445 size_t rxconsumed = 0; 1446 1447 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1448 1449 for (;;) { 1450 if (consumer == rxq->rxq_producer) { 1451 rxq->rxq_consumer = consumer; 1452 rxq->rxq_inuse -= rxconsumed; 1453 KASSERT(rxq->rxq_inuse == 0); 1454 return; 1455 } 1456 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1457 const uint16_t rxbd_flags = consumer->rxbd_flags; 1458 if (rxbd_flags & RXBD_E) { 1459 rxq->rxq_consumer = consumer; 1460 rxq->rxq_inuse -= rxconsumed; 1461 return; 1462 } 1463 KASSERT(rxq->rxq_mconsumer != NULL); 1464 #ifdef ETSEC_DEBUG 1465 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1466 #endif 1467 #if 0 1468 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1469 __func__, 1470 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1471 mtod(rxq->rxq_mconsumer, int *)[0], 1472 mtod(rxq->rxq_mconsumer, int *)[1], 1473 mtod(rxq->rxq_mconsumer, int *)[2], 1474 mtod(rxq->rxq_mconsumer, int *)[3]); 1475 #endif 1476 /* 1477 * We own this packet again. Clear all flags except wrap. 1478 */ 1479 rxconsumed++; 1480 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1481 1482 /* 1483 * If this descriptor has the LAST bit set and no errors, 1484 * it's a valid input packet. 1485 */ 1486 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1487 size_t rxbd_len = consumer->rxbd_len; 1488 struct mbuf *m = rxq->rxq_mhead; 1489 struct mbuf *m_last = rxq->rxq_mconsumer; 1490 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1491 rxq->rxq_mtail = &rxq->rxq_mhead; 1492 rxq->rxq_mconsumer = rxq->rxq_mhead; 1493 m_last->m_next = NULL; 1494 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1495 m->m_pkthdr.len = rxbd_len; 1496 pq3etsec_rx_input(sc, m, rxbd_flags); 1497 } else if (rxbd_flags & RXBD_L) { 1498 KASSERT(rxbd_flags & RXBD_ERRORS); 1499 struct mbuf *m; 1500 /* 1501 * We encountered an error, take the mbufs and add 1502 * then to the rx bufcache so we can reuse them. 1503 */ 1504 ifp->if_ierrors++; 1505 for (m = rxq->rxq_mhead; 1506 m != rxq->rxq_mconsumer; 1507 m = m->m_next) { 1508 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1509 } 1510 m = rxq->rxq_mconsumer; 1511 if ((rxq->rxq_mhead = m->m_next) == NULL) 1512 rxq->rxq_mtail = &rxq->rxq_mhead; 1513 rxq->rxq_mconsumer = m->m_next; 1514 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1515 } else { 1516 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1517 } 1518 #ifdef ETSEC_DEBUG 1519 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1520 #endif 1521 1522 /* 1523 * Wrap at the last entry! 1524 */ 1525 if (rxbd_flags & RXBD_W) { 1526 KASSERT(consumer + 1 == rxq->rxq_last); 1527 consumer = rxq->rxq_first; 1528 } else { 1529 consumer++; 1530 } 1531 #ifdef ETSEC_DEBUG 1532 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1533 #endif 1534 } 1535 } 1536 1537 static void 1538 pq3etsec_rxq_purge( 1539 struct pq3etsec_softc *sc, 1540 struct pq3etsec_rxqueue *rxq, 1541 bool discard) 1542 { 1543 struct mbuf *m; 1544 1545 if ((m = rxq->rxq_mhead) != NULL) { 1546 #ifdef ETSEC_DEBUG 1547 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1548 #endif 1549 1550 if (discard) { 1551 pq3etsec_rx_map_unload(sc, m); 1552 m_freem(m); 1553 } else { 1554 while (m != NULL) { 1555 struct mbuf *m0 = m->m_next; 1556 m->m_next = NULL; 1557 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1558 m = m0; 1559 } 1560 } 1561 1562 } 1563 1564 rxq->rxq_mconsumer = NULL; 1565 rxq->rxq_mhead = NULL; 1566 rxq->rxq_mtail = &rxq->rxq_mhead; 1567 rxq->rxq_inuse = 0; 1568 } 1569 1570 static void 1571 pq3etsec_rxq_reset( 1572 struct pq3etsec_softc *sc, 1573 struct pq3etsec_rxqueue *rxq) 1574 { 1575 /* 1576 * sync all the descriptors 1577 */ 1578 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1579 rxq->rxq_last - rxq->rxq_first); 1580 1581 /* 1582 * Make sure we own all descriptors in the ring. 1583 */ 1584 volatile struct rxbd *rxbd; 1585 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1586 rxbd->rxbd_flags = RXBD_I; 1587 } 1588 1589 /* 1590 * Last descriptor has the wrap flag. 1591 */ 1592 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1593 1594 /* 1595 * Reset the producer consumer indexes. 1596 */ 1597 rxq->rxq_consumer = rxq->rxq_first; 1598 rxq->rxq_producer = rxq->rxq_first; 1599 rxq->rxq_inuse = 0; 1600 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1601 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1602 1603 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1604 1605 /* 1606 * Restart the transmit at the first descriptor 1607 */ 1608 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1609 } 1610 1611 static int 1612 pq3etsec_rxq_attach( 1613 struct pq3etsec_softc *sc, 1614 struct pq3etsec_rxqueue *rxq, 1615 u_int qno) 1616 { 1617 size_t map_size = PAGE_SIZE; 1618 size_t desc_count = map_size / sizeof(struct rxbd); 1619 int error; 1620 void *descs; 1621 1622 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1623 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1624 if (error) 1625 return error; 1626 1627 memset(descs, 0, map_size); 1628 rxq->rxq_first = descs; 1629 rxq->rxq_last = rxq->rxq_first + desc_count; 1630 rxq->rxq_consumer = descs; 1631 rxq->rxq_producer = descs; 1632 1633 pq3etsec_rxq_purge(sc, rxq, true); 1634 pq3etsec_rxq_reset(sc, rxq); 1635 1636 rxq->rxq_reg_rbase = RBASEn(qno); 1637 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1638 1639 return 0; 1640 } 1641 1642 static bool 1643 pq3etsec_txq_active_p( 1644 struct pq3etsec_softc * const sc, 1645 struct pq3etsec_txqueue *txq) 1646 { 1647 return !IF_IS_EMPTY(&txq->txq_mbufs); 1648 } 1649 1650 static bool 1651 pq3etsec_txq_fillable_p( 1652 struct pq3etsec_softc * const sc, 1653 struct pq3etsec_txqueue *txq) 1654 { 1655 return txq->txq_free >= txq->txq_threshold; 1656 } 1657 1658 static int 1659 pq3etsec_txq_attach( 1660 struct pq3etsec_softc *sc, 1661 struct pq3etsec_txqueue *txq, 1662 u_int qno) 1663 { 1664 size_t map_size = PAGE_SIZE; 1665 size_t desc_count = map_size / sizeof(struct txbd); 1666 int error; 1667 void *descs; 1668 1669 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1670 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1671 if (error) 1672 return error; 1673 1674 memset(descs, 0, map_size); 1675 txq->txq_first = descs; 1676 txq->txq_last = txq->txq_first + desc_count; 1677 txq->txq_consumer = descs; 1678 txq->txq_producer = descs; 1679 1680 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1681 1682 txq->txq_reg_tbase = TBASEn(qno); 1683 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1684 1685 pq3etsec_txq_reset(sc, txq); 1686 1687 return 0; 1688 } 1689 1690 static int 1691 pq3etsec_txq_map_load( 1692 struct pq3etsec_softc *sc, 1693 struct pq3etsec_txqueue *txq, 1694 struct mbuf *m) 1695 { 1696 bus_dmamap_t map; 1697 int error; 1698 1699 map = M_GETCTX(m, bus_dmamap_t); 1700 if (map != NULL) 1701 return 0; 1702 1703 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1704 if (map == NULL) 1705 return ENOMEM; 1706 1707 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1708 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1709 if (error) 1710 return error; 1711 1712 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1713 BUS_DMASYNC_PREWRITE); 1714 M_SETCTX(m, map); 1715 return 0; 1716 } 1717 1718 static void 1719 pq3etsec_txq_map_unload( 1720 struct pq3etsec_softc *sc, 1721 struct pq3etsec_txqueue *txq, 1722 struct mbuf *m) 1723 { 1724 KASSERT(m); 1725 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1726 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1727 BUS_DMASYNC_POSTWRITE); 1728 bus_dmamap_unload(sc->sc_dmat, map); 1729 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1730 } 1731 1732 static bool 1733 pq3etsec_txq_produce( 1734 struct pq3etsec_softc *sc, 1735 struct pq3etsec_txqueue *txq, 1736 struct mbuf *m) 1737 { 1738 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1739 1740 if (map->dm_nsegs > txq->txq_free) 1741 return false; 1742 1743 /* 1744 * TCP Offload flag must be set in the first descriptor. 1745 */ 1746 volatile struct txbd *producer = txq->txq_producer; 1747 uint16_t last_flags = TXBD_L; 1748 uint16_t first_flags = TXBD_R 1749 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1750 1751 /* 1752 * If we've produced enough descriptors without consuming any 1753 * we need to ask for an interrupt to reclaim some. 1754 */ 1755 txq->txq_lastintr += map->dm_nsegs; 1756 if (txq->txq_lastintr >= txq->txq_threshold 1757 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1758 txq->txq_lastintr = 0; 1759 last_flags |= TXBD_I; 1760 } 1761 1762 #ifdef ETSEC_DEBUG 1763 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1764 #endif 1765 KASSERT(producer != txq->txq_last); 1766 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1767 producer->txbd_len = map->dm_segs[0].ds_len; 1768 1769 if (map->dm_nsegs > 1) { 1770 volatile struct txbd *start = producer + 1; 1771 size_t count = map->dm_nsegs - 1; 1772 for (u_int i = 1; i < map->dm_nsegs; i++) { 1773 if (__predict_false(++producer == txq->txq_last)) { 1774 producer = txq->txq_first; 1775 if (start < txq->txq_last) { 1776 pq3etsec_txq_desc_presync(sc, txq, 1777 start, txq->txq_last - start); 1778 count -= txq->txq_last - start; 1779 } 1780 start = txq->txq_first; 1781 } 1782 #ifdef ETSEC_DEBUG 1783 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1784 #endif 1785 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1786 producer->txbd_len = map->dm_segs[i].ds_len; 1787 producer->txbd_flags = TXBD_R 1788 | (producer->txbd_flags & TXBD_W) 1789 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1790 #if 0 1791 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1792 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1793 #endif 1794 } 1795 pq3etsec_txq_desc_presync(sc, txq, start, count); 1796 } else { 1797 first_flags |= last_flags; 1798 } 1799 1800 membar_producer(); 1801 txq->txq_producer->txbd_flags = 1802 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1803 #if 0 1804 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1805 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1806 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1807 #endif 1808 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1809 1810 /* 1811 * Reduce free count by the number of segments we consumed. 1812 */ 1813 txq->txq_free -= map->dm_nsegs; 1814 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1815 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1816 KASSERT(producer->txbd_flags & TXBD_L); 1817 #ifdef ETSEC_DEBUG 1818 txq->txq_lmbufs[producer - txq->txq_first] = m; 1819 #endif 1820 1821 #if 0 1822 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1823 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1824 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1825 #endif 1826 1827 if (++producer == txq->txq_last) 1828 txq->txq_producer = txq->txq_first; 1829 else 1830 txq->txq_producer = producer; 1831 IF_ENQUEUE(&txq->txq_mbufs, m); 1832 1833 /* 1834 * Restart the transmitter. 1835 */ 1836 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 1837 1838 return true; 1839 } 1840 1841 static void 1842 pq3etsec_tx_offload( 1843 struct pq3etsec_softc *sc, 1844 struct pq3etsec_txqueue *txq, 1845 struct mbuf **mp) 1846 { 1847 struct mbuf *m = *mp; 1848 u_int csum_flags = m->m_pkthdr.csum_flags; 1849 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m); 1850 1851 KASSERT(m->m_flags & M_PKTHDR); 1852 1853 /* 1854 * Let see if we are doing any offload first. 1855 */ 1856 if (csum_flags == 0 && vtag == 0) { 1857 m->m_flags &= ~M_HASFCB; 1858 return; 1859 } 1860 1861 uint16_t flags = 0; 1862 if (csum_flags & M_CSUM_IP) { 1863 flags |= TXFCB_IP 1864 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 1865 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 1866 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 1867 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 1868 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 1869 } 1870 if (vtag) { 1871 flags |= TXFCB_VLN; 1872 } 1873 if (flags == 0) { 1874 m->m_flags &= ~M_HASFCB; 1875 return; 1876 } 1877 1878 struct txfcb fcb; 1879 fcb.txfcb_flags = flags; 1880 if (csum_flags & M_CSUM_IPv4) 1881 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 1882 else 1883 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); 1884 fcb.txfcb_l3os = ETHER_HDR_LEN; 1885 fcb.txfcb_phcs = 0; 1886 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0; 1887 1888 #if 0 1889 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 1890 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 1891 fcb.txfcb_phcs, fcb.txfcb_vlctl); 1892 #endif 1893 1894 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 1895 m->m_data -= sizeof(fcb); 1896 m->m_len += sizeof(fcb); 1897 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 1898 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 1899 m->m_data = m->m_pktdat; 1900 m->m_len += sizeof(fcb); 1901 } else { 1902 struct mbuf *mn; 1903 MGET(mn, M_DONTWAIT, m->m_type); 1904 if (mn == NULL) { 1905 if (csum_flags & M_CSUM_IP4) { 1906 #ifdef INET 1907 ip_undefer_csum(m, ETHER_HDR_LEN, 1908 csum_flags & M_CSUM_IP4); 1909 #else 1910 panic("%s: impossible M_CSUM flags %#x", 1911 device_xname(sc->sc_dev), csum_flags); 1912 #endif 1913 } else if (csum_flags & M_CSUM_IP6) { 1914 #ifdef INET6 1915 ip6_undefer_csum(m, ETHER_HDR_LEN, 1916 csum_flags & M_CSUM_IP6); 1917 #else 1918 panic("%s: impossible M_CSUM flags %#x", 1919 device_xname(sc->sc_dev), csum_flags); 1920 #endif 1921 } else if (vtag) { 1922 } 1923 1924 m->m_flags &= ~M_HASFCB; 1925 return; 1926 } 1927 1928 M_MOVE_PKTHDR(mn, m); 1929 mn->m_next = m; 1930 m = mn; 1931 MH_ALIGN(m, sizeof(fcb)); 1932 m->m_len = sizeof(fcb); 1933 *mp = m; 1934 } 1935 m->m_pkthdr.len += sizeof(fcb); 1936 m->m_flags |= M_HASFCB; 1937 *mtod(m, struct txfcb *) = fcb; 1938 return; 1939 } 1940 1941 static bool 1942 pq3etsec_txq_enqueue( 1943 struct pq3etsec_softc *sc, 1944 struct pq3etsec_txqueue *txq) 1945 { 1946 for (;;) { 1947 if (IF_QFULL(&txq->txq_mbufs)) 1948 return false; 1949 struct mbuf *m = txq->txq_next; 1950 if (m == NULL) { 1951 int s = splnet(); 1952 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1953 splx(s); 1954 if (m == NULL) 1955 return true; 1956 M_SETCTX(m, NULL); 1957 pq3etsec_tx_offload(sc, txq, &m); 1958 } else { 1959 txq->txq_next = NULL; 1960 } 1961 int error = pq3etsec_txq_map_load(sc, txq, m); 1962 if (error) { 1963 aprint_error_dev(sc->sc_dev, 1964 "discarded packet due to " 1965 "dmamap load failure: %d\n", error); 1966 m_freem(m); 1967 continue; 1968 } 1969 KASSERT(txq->txq_next == NULL); 1970 if (!pq3etsec_txq_produce(sc, txq, m)) { 1971 txq->txq_next = m; 1972 return false; 1973 } 1974 KASSERT(txq->txq_next == NULL); 1975 } 1976 } 1977 1978 static bool 1979 pq3etsec_txq_consume( 1980 struct pq3etsec_softc *sc, 1981 struct pq3etsec_txqueue *txq) 1982 { 1983 struct ifnet * const ifp = &sc->sc_if; 1984 volatile struct txbd *consumer = txq->txq_consumer; 1985 size_t txfree = 0; 1986 1987 #if 0 1988 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1989 #endif 1990 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 1991 1992 for (;;) { 1993 if (consumer == txq->txq_producer) { 1994 txq->txq_consumer = consumer; 1995 txq->txq_free += txfree; 1996 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1997 #if 0 1998 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 1999 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2000 #endif 2001 KASSERT(txq->txq_lastintr == 0); 2002 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2003 return true; 2004 } 2005 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2006 const uint16_t txbd_flags = consumer->txbd_flags; 2007 if (txbd_flags & TXBD_R) { 2008 txq->txq_consumer = consumer; 2009 txq->txq_free += txfree; 2010 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2011 #if 0 2012 printf("%s: freed %zu descriptors\n", 2013 __func__, txfree); 2014 #endif 2015 return pq3etsec_txq_fillable_p(sc, txq); 2016 } 2017 2018 /* 2019 * If this is the last descriptor in the chain, get the 2020 * mbuf, free its dmamap, and free the mbuf chain itself. 2021 */ 2022 if (txbd_flags & TXBD_L) { 2023 struct mbuf *m; 2024 2025 IF_DEQUEUE(&txq->txq_mbufs, m); 2026 #ifdef ETSEC_DEBUG 2027 KASSERTMSG( 2028 m == txq->txq_lmbufs[consumer-txq->txq_first], 2029 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2030 __func__, consumer, consumer - txq->txq_first, 2031 txbd_flags, m, 2032 &txq->txq_lmbufs[consumer-txq->txq_first], 2033 txq->txq_lmbufs[consumer-txq->txq_first]); 2034 #endif 2035 KASSERT(m); 2036 pq3etsec_txq_map_unload(sc, txq, m); 2037 #if 0 2038 printf("%s: mbuf %p: consumed a %u byte packet\n", 2039 __func__, m, m->m_pkthdr.len); 2040 #endif 2041 if (m->m_flags & M_HASFCB) 2042 m_adj(m, sizeof(struct txfcb)); 2043 ifp->if_opackets++; 2044 ifp->if_obytes += m->m_pkthdr.len; 2045 if (m->m_flags & M_MCAST) 2046 ifp->if_omcasts++; 2047 if (txbd_flags & TXBD_ERRORS) 2048 ifp->if_oerrors++; 2049 m_freem(m); 2050 #ifdef ETSEC_DEBUG 2051 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2052 #endif 2053 } else { 2054 #ifdef ETSEC_DEBUG 2055 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2056 #endif 2057 } 2058 2059 /* 2060 * We own this packet again. Clear all flags except wrap. 2061 */ 2062 txfree++; 2063 //consumer->txbd_flags = txbd_flags & TXBD_W; 2064 2065 /* 2066 * Wrap at the last entry! 2067 */ 2068 if (txbd_flags & TXBD_W) { 2069 KASSERT(consumer + 1 == txq->txq_last); 2070 consumer = txq->txq_first; 2071 } else { 2072 consumer++; 2073 KASSERT(consumer < txq->txq_last); 2074 } 2075 } 2076 } 2077 2078 static void 2079 pq3etsec_txq_purge( 2080 struct pq3etsec_softc *sc, 2081 struct pq3etsec_txqueue *txq) 2082 { 2083 struct mbuf *m; 2084 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2085 2086 for (;;) { 2087 IF_DEQUEUE(&txq->txq_mbufs, m); 2088 if (m == NULL) 2089 break; 2090 pq3etsec_txq_map_unload(sc, txq, m); 2091 m_freem(m); 2092 } 2093 if ((m = txq->txq_next) != NULL) { 2094 txq->txq_next = NULL; 2095 pq3etsec_txq_map_unload(sc, txq, m); 2096 m_freem(m); 2097 } 2098 #ifdef ETSEC_DEBUG 2099 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2100 #endif 2101 } 2102 2103 static void 2104 pq3etsec_txq_reset( 2105 struct pq3etsec_softc *sc, 2106 struct pq3etsec_txqueue *txq) 2107 { 2108 /* 2109 * sync all the descriptors 2110 */ 2111 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2112 txq->txq_last - txq->txq_first); 2113 2114 /* 2115 * Make sure we own all descriptors in the ring. 2116 */ 2117 volatile struct txbd *txbd; 2118 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2119 txbd->txbd_flags = 0; 2120 } 2121 2122 /* 2123 * Last descriptor has the wrap flag. 2124 */ 2125 txbd->txbd_flags = TXBD_W; 2126 2127 /* 2128 * Reset the producer consumer indexes. 2129 */ 2130 txq->txq_consumer = txq->txq_first; 2131 txq->txq_producer = txq->txq_first; 2132 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2133 txq->txq_threshold = txq->txq_free / 2; 2134 txq->txq_lastintr = 0; 2135 2136 /* 2137 * What do we want to get interrupted on? 2138 */ 2139 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2140 2141 /* 2142 * Restart the transmit at the first descriptor 2143 */ 2144 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2145 } 2146 2147 static void 2148 pq3etsec_ifstart(struct ifnet *ifp) 2149 { 2150 struct pq3etsec_softc * const sc = ifp->if_softc; 2151 2152 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2153 softint_schedule(sc->sc_soft_ih); 2154 } 2155 2156 static void 2157 pq3etsec_tx_error( 2158 struct pq3etsec_softc * const sc) 2159 { 2160 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2161 2162 pq3etsec_txq_consume(sc, txq); 2163 2164 if (pq3etsec_txq_fillable_p(sc, txq)) 2165 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2166 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2167 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2168 } 2169 2170 if (pq3etsec_txq_active_p(sc, txq)) 2171 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2172 if (!pq3etsec_txq_enqueue(sc, txq)) { 2173 sc->sc_ev_tx_stall.ev_count++; 2174 sc->sc_if.if_flags |= IFF_OACTIVE; 2175 } 2176 2177 sc->sc_txerrors = 0; 2178 } 2179 2180 int 2181 pq3etsec_tx_intr(void *arg) 2182 { 2183 struct pq3etsec_softc * const sc = arg; 2184 2185 sc->sc_ev_tx_intr.ev_count++; 2186 2187 uint32_t ievent = etsec_read(sc, IEVENT); 2188 ievent &= IEVENT_TXF|IEVENT_TXB; 2189 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2190 2191 #if 0 2192 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2193 __func__, ievent, etsec_read(sc, IMASK)); 2194 #endif 2195 2196 if (ievent == 0) 2197 return 0; 2198 2199 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2200 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2201 etsec_write(sc, IMASK, sc->sc_imask); 2202 softint_schedule(sc->sc_soft_ih); 2203 return 1; 2204 } 2205 2206 int 2207 pq3etsec_rx_intr(void *arg) 2208 { 2209 struct pq3etsec_softc * const sc = arg; 2210 2211 sc->sc_ev_rx_intr.ev_count++; 2212 2213 uint32_t ievent = etsec_read(sc, IEVENT); 2214 ievent &= IEVENT_RXF|IEVENT_RXB; 2215 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2216 if (ievent == 0) 2217 return 0; 2218 2219 #if 0 2220 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2221 #endif 2222 2223 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2224 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2225 etsec_write(sc, IMASK, sc->sc_imask); 2226 softint_schedule(sc->sc_soft_ih); 2227 return 1; 2228 } 2229 2230 int 2231 pq3etsec_error_intr(void *arg) 2232 { 2233 struct pq3etsec_softc * const sc = arg; 2234 2235 sc->sc_ev_error_intr.ev_count++; 2236 2237 for (int rv = 0, soft_flags = 0;; rv = 1) { 2238 uint32_t ievent = etsec_read(sc, IEVENT); 2239 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2240 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2241 if (ievent == 0) { 2242 if (soft_flags) { 2243 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2244 softint_schedule(sc->sc_soft_ih); 2245 } 2246 return rv; 2247 } 2248 #if 0 2249 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2250 __func__, ievent, etsec_read(sc, IMASK)); 2251 #endif 2252 2253 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2254 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2255 etsec_write(sc, IMASK, sc->sc_imask); 2256 wakeup(sc); 2257 } 2258 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2259 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2260 etsec_write(sc, IMASK, sc->sc_imask); 2261 wakeup(&sc->sc_mii); 2262 } 2263 if (ievent & IEVENT_BSY) { 2264 soft_flags |= SOFT_RXBSY; 2265 sc->sc_imask &= ~IEVENT_BSY; 2266 etsec_write(sc, IMASK, sc->sc_imask); 2267 } 2268 if (ievent & IEVENT_TXE) { 2269 soft_flags |= SOFT_TXERROR; 2270 sc->sc_imask &= ~IEVENT_TXE; 2271 sc->sc_txerrors |= ievent; 2272 } 2273 if (ievent & IEVENT_TXC) { 2274 sc->sc_ev_tx_pause.ev_count++; 2275 } 2276 if (ievent & IEVENT_RXC) { 2277 sc->sc_ev_rx_pause.ev_count++; 2278 } 2279 if (ievent & IEVENT_DPE) { 2280 soft_flags |= SOFT_RESET; 2281 sc->sc_imask &= ~IEVENT_DPE; 2282 etsec_write(sc, IMASK, sc->sc_imask); 2283 } 2284 } 2285 } 2286 2287 void 2288 pq3etsec_soft_intr(void *arg) 2289 { 2290 struct pq3etsec_softc * const sc = arg; 2291 struct ifnet * const ifp = &sc->sc_if; 2292 2293 mutex_enter(sc->sc_lock); 2294 2295 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2296 2297 sc->sc_ev_soft_intr.ev_count++; 2298 2299 if (soft_flags & SOFT_RESET) { 2300 int s = splnet(); 2301 pq3etsec_ifinit(ifp); 2302 splx(s); 2303 soft_flags = 0; 2304 } 2305 2306 if (soft_flags & SOFT_RXBSY) { 2307 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2308 size_t threshold = 5 * rxq->rxq_threshold / 4; 2309 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2310 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2311 } else { 2312 sc->sc_imask |= IEVENT_BSY; 2313 } 2314 aprint_normal_dev(sc->sc_dev, 2315 "increasing receive buffers from %zu to %zu\n", 2316 rxq->rxq_threshold, threshold); 2317 rxq->rxq_threshold = threshold; 2318 } 2319 2320 if ((soft_flags & SOFT_TXINTR) 2321 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2322 /* 2323 * Let's do what we came here for. Consume transmitted 2324 * packets off the the transmit ring. 2325 */ 2326 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2327 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2328 sc->sc_ev_tx_stall.ev_count++; 2329 ifp->if_flags |= IFF_OACTIVE; 2330 } else { 2331 ifp->if_flags &= ~IFF_OACTIVE; 2332 } 2333 sc->sc_imask |= IEVENT_TXF; 2334 } 2335 2336 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2337 /* 2338 * Let's consume 2339 */ 2340 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2341 sc->sc_imask |= IEVENT_RXF; 2342 } 2343 2344 if (soft_flags & SOFT_TXERROR) { 2345 pq3etsec_tx_error(sc); 2346 sc->sc_imask |= IEVENT_TXE; 2347 } 2348 2349 if (ifp->if_flags & IFF_RUNNING) { 2350 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2351 etsec_write(sc, IMASK, sc->sc_imask); 2352 } else { 2353 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2354 } 2355 2356 mutex_exit(sc->sc_lock); 2357 } 2358 2359 static void 2360 pq3etsec_mii_tick(void *arg) 2361 { 2362 struct pq3etsec_softc * const sc = arg; 2363 mutex_enter(sc->sc_lock); 2364 callout_ack(&sc->sc_mii_callout); 2365 sc->sc_ev_mii_ticks.ev_count++; 2366 #ifdef DEBUG 2367 uint64_t now = mftb(); 2368 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2369 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2370 __func__, now - sc->sc_mii_last_tick); 2371 callout_stop(&sc->sc_mii_callout); 2372 } 2373 #endif 2374 mii_tick(&sc->sc_mii); 2375 int s = splnet(); 2376 if (sc->sc_soft_flags & SOFT_RESET) 2377 softint_schedule(sc->sc_soft_ih); 2378 splx(s); 2379 callout_schedule(&sc->sc_mii_callout, hz); 2380 #ifdef DEBUG 2381 sc->sc_mii_last_tick = now; 2382 #endif 2383 mutex_exit(sc->sc_lock); 2384 } 2385