1 /* $NetBSD: pq3etsec.c,v 1.9 2011/10/13 19:53:30 matt Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 39 #include <sys/cdefs.h> 40 41 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.9 2011/10/13 19:53:30 matt Exp $"); 42 43 #include <sys/param.h> 44 #include <sys/cpu.h> 45 #include <sys/device.h> 46 #include <sys/mbuf.h> 47 #include <sys/ioctl.h> 48 #include <sys/intr.h> 49 #include <sys/bus.h> 50 #include <sys/kernel.h> 51 #include <sys/kmem.h> 52 #include <sys/proc.h> 53 #include <sys/atomic.h> 54 #include <sys/callout.h> 55 56 #include <net/if.h> 57 #include <net/if_dl.h> 58 #include <net/if_ether.h> 59 #include <net/if_media.h> 60 61 #include <dev/mii/miivar.h> 62 63 #include "ioconf.h" 64 65 #include <net/bpf.h> 66 67 #ifdef INET 68 #include <netinet/in.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/ip.h> 71 #include <netinet/in_offload.h> 72 #endif /* INET */ 73 #ifdef INET6 74 #include <netinet6/in6.h> 75 #include <netinet/ip6.h> 76 #endif 77 #include <netinet6/in6_offload.h> 78 79 80 #include <powerpc/spr.h> 81 #include <powerpc/booke/spr.h> 82 83 #include <powerpc/booke/cpuvar.h> 84 #include <powerpc/booke/e500var.h> 85 #include <powerpc/booke/e500reg.h> 86 #include <powerpc/booke/etsecreg.h> 87 88 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 89 90 #define ETSEC_MAXTXMBUFS 30 91 #define ETSEC_NTXSEGS 30 92 #define ETSEC_MAXRXMBUFS 511 93 #define ETSEC_MINRXMBUFS 32 94 #define ETSEC_NRXSEGS 1 95 96 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 97 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 98 |IFCAP_CSUM_UDPv4_Rx\ 99 |IFCAP_CSUM_TCPv6_Rx\ 100 |IFCAP_CSUM_UDPv6_Rx) 101 102 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 103 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 104 |IFCAP_CSUM_UDPv4_Tx\ 105 |IFCAP_CSUM_TCPv6_Tx\ 106 |IFCAP_CSUM_UDPv6_Tx) 107 108 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 109 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 110 111 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 112 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 113 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 114 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 115 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 116 #define M_CSUM_CIP (M_CSUM_IPv4) 117 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 118 119 struct pq3etsec_txqueue { 120 bus_dmamap_t txq_descmap; 121 volatile struct txbd *txq_consumer; 122 volatile struct txbd *txq_producer; 123 volatile struct txbd *txq_first; 124 volatile struct txbd *txq_last; 125 struct ifqueue txq_mbufs; 126 struct mbuf *txq_next; 127 #ifdef ETSEC_DEBUG 128 struct mbuf *txq_lmbufs[512]; 129 #endif 130 uint32_t txq_qmask; 131 uint32_t txq_free; 132 uint32_t txq_threshold; 133 uint32_t txq_lastintr; 134 bus_size_t txq_reg_tbase; 135 bus_dma_segment_t txq_descmap_seg; 136 }; 137 138 struct pq3etsec_rxqueue { 139 bus_dmamap_t rxq_descmap; 140 volatile struct rxbd *rxq_consumer; 141 volatile struct rxbd *rxq_producer; 142 volatile struct rxbd *rxq_first; 143 volatile struct rxbd *rxq_last; 144 struct mbuf *rxq_mhead; 145 struct mbuf **rxq_mtail; 146 struct mbuf *rxq_mconsumer; 147 #ifdef ETSEC_DEBUG 148 struct mbuf *rxq_mbufs[512]; 149 #endif 150 uint32_t rxq_qmask; 151 uint32_t rxq_inuse; 152 uint32_t rxq_threshold; 153 bus_size_t rxq_reg_rbase; 154 bus_size_t rxq_reg_rbptr; 155 bus_dma_segment_t rxq_descmap_seg; 156 }; 157 158 struct pq3etsec_mapcache { 159 u_int dmc_nmaps; 160 u_int dmc_maxseg; 161 u_int dmc_maxmaps; 162 u_int dmc_maxmapsize; 163 bus_dmamap_t dmc_maps[0]; 164 }; 165 166 struct pq3etsec_softc { 167 device_t sc_dev; 168 struct ethercom sc_ec; 169 #define sc_if sc_ec.ec_if 170 struct mii_data sc_mii; 171 bus_space_tag_t sc_bst; 172 bus_space_handle_t sc_bsh; 173 bus_dma_tag_t sc_dmat; 174 int sc_phy_addr; 175 prop_dictionary_t sc_intrmap; 176 uint32_t sc_intrmask; 177 178 uint32_t sc_soft_flags; 179 #define SOFT_RESET 0x0001 180 #define SOFT_RXINTR 0x0010 181 #define SOFT_RXBSY 0x0020 182 #define SOFT_TXINTR 0x0100 183 #define SOFT_TXERROR 0x0200 184 185 struct pq3etsec_txqueue sc_txq; 186 struct pq3etsec_rxqueue sc_rxq; 187 uint32_t sc_txerrors; 188 uint32_t sc_rxerrors; 189 190 size_t sc_rx_adjlen; 191 192 /* 193 * Copies of various ETSEC registers. 194 */ 195 uint32_t sc_imask; 196 uint32_t sc_maccfg1; 197 uint32_t sc_maccfg2; 198 uint32_t sc_maxfrm; 199 uint32_t sc_ecntrl; 200 uint32_t sc_dmactrl; 201 uint32_t sc_macstnaddr1; 202 uint32_t sc_macstnaddr2; 203 uint32_t sc_tctrl; 204 uint32_t sc_rctrl; 205 uint32_t sc_gaddr[16]; 206 uint64_t sc_macaddrs[15]; 207 208 void *sc_tx_ih; 209 void *sc_rx_ih; 210 void *sc_error_ih; 211 void *sc_soft_ih; 212 213 kmutex_t *sc_lock; 214 215 struct evcnt sc_ev_tx_stall; 216 struct evcnt sc_ev_tx_intr; 217 struct evcnt sc_ev_rx_stall; 218 struct evcnt sc_ev_rx_intr; 219 struct evcnt sc_ev_error_intr; 220 struct evcnt sc_ev_soft_intr; 221 struct evcnt sc_ev_tx_pause; 222 struct evcnt sc_ev_rx_pause; 223 struct evcnt sc_ev_mii_ticks; 224 225 struct callout sc_mii_callout; 226 uint64_t sc_mii_last_tick; 227 228 struct ifqueue sc_rx_bufcache; 229 struct pq3etsec_mapcache *sc_rx_mapcache; 230 struct pq3etsec_mapcache *sc_tx_mapcache; 231 }; 232 233 static int pq3etsec_match(device_t, cfdata_t, void *); 234 static void pq3etsec_attach(device_t, device_t, void *); 235 236 static void pq3etsec_ifstart(struct ifnet *); 237 static void pq3etsec_ifwatchdog(struct ifnet *); 238 static int pq3etsec_ifinit(struct ifnet *); 239 static void pq3etsec_ifstop(struct ifnet *, int); 240 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 241 242 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 243 struct pq3etsec_mapcache **, size_t, size_t, size_t, size_t); 244 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 245 struct pq3etsec_mapcache *); 246 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 247 struct pq3etsec_mapcache *); 248 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 249 struct pq3etsec_mapcache *, bus_dmamap_t); 250 251 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 252 struct pq3etsec_txqueue *, u_int); 253 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 254 struct pq3etsec_txqueue *); 255 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 256 struct pq3etsec_txqueue *); 257 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 258 struct pq3etsec_txqueue *); 259 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 260 struct pq3etsec_txqueue *, struct mbuf *m); 261 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 262 struct pq3etsec_txqueue *); 263 264 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 265 struct pq3etsec_rxqueue *, u_int); 266 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 267 struct pq3etsec_rxqueue *); 268 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 269 struct pq3etsec_rxqueue *, bool); 270 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 271 struct pq3etsec_rxqueue *); 272 273 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 274 275 static void pq3etsec_mii_tick(void *); 276 static int pq3etsec_rx_intr(void *); 277 static int pq3etsec_tx_intr(void *); 278 static int pq3etsec_error_intr(void *); 279 static void pq3etsec_soft_intr(void *); 280 281 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 282 pq3etsec_match, pq3etsec_attach, NULL, NULL); 283 284 static int 285 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 286 { 287 288 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 289 return 0; 290 291 return 1; 292 } 293 294 static inline uint32_t 295 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 296 { 297 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 298 } 299 300 static inline void 301 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 302 { 303 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 304 } 305 306 static int 307 pq3etsec_mii_readreg(device_t self, int phy, int reg) 308 { 309 struct pq3etsec_softc * const sc = device_private(self); 310 uint32_t miimcom = etsec_read(sc, MIIMCOM); 311 312 // int s = splnet(); 313 314 etsec_write(sc, MIIMADD, 315 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 316 317 etsec_write(sc, IEVENT, IEVENT_MMRD); 318 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */ 319 etsec_write(sc, MIIMCOM, MIIMCOM_READ); 320 #if 0 321 sc->sc_imask |= IEVENT_MMRD; 322 etsec_write(sc, IMASK, sc->sc_imask); 323 #endif 324 325 while (etsec_read(sc, MIIMIND) != 0) { 326 delay(1); 327 } 328 int data = etsec_read(sc, MIIMSTAT); 329 330 if (miimcom == MIIMCOM_SCAN) 331 etsec_write(sc, MIIMCOM, miimcom); 332 333 #if 0 334 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n", 335 __func__, phy, reg, data); 336 #endif 337 etsec_write(sc, IEVENT, IEVENT_MMRD); 338 // splx(s); 339 return data; 340 } 341 342 static void 343 pq3etsec_mii_writereg(device_t self, int phy, int reg, int data) 344 { 345 struct pq3etsec_softc * const sc = device_private(self); 346 uint32_t miimcom = etsec_read(sc, MIIMCOM); 347 348 #if 0 349 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n", 350 __func__, phy, reg, data); 351 #endif 352 353 // int s = splnet(); 354 etsec_write(sc, IEVENT, IEVENT_MMWR); 355 etsec_write(sc, MIIMADD, 356 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 357 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */ 358 etsec_write(sc, MIIMCON, data); 359 360 #if 0 361 sc->sc_imask |= IEVENT_MMWR; 362 etsec_write(sc, IMASK, sc->sc_imask); 363 #endif 364 365 int timo = 1000; /* 1ms */ 366 while ((etsec_read(sc, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 367 delay(1); 368 } 369 370 if (miimcom == MIIMCOM_SCAN) 371 etsec_write(sc, MIIMCOM, miimcom); 372 etsec_write(sc, IEVENT, IEVENT_MMWR); 373 // splx(s); 374 } 375 376 static void 377 pq3etsec_mii_statchg(device_t self) 378 { 379 struct pq3etsec_softc * const sc = device_private(self); 380 struct mii_data * const mii = &sc->sc_mii; 381 382 uint32_t maccfg1 = sc->sc_maccfg1; 383 uint32_t maccfg2 = sc->sc_maccfg2; 384 uint32_t ecntrl = sc->sc_ecntrl; 385 386 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 387 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 388 389 if (sc->sc_mii.mii_media_active & IFM_FDX) { 390 maccfg2 |= MACCFG2_FD; 391 } 392 393 /* 394 * Now deal with the flow control bits. 395 */ 396 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 397 && (mii->mii_media_active & IFM_ETH_FMASK)) { 398 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 399 maccfg1 |= MACCFG1_RX_FLOW; 400 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 401 maccfg1 |= MACCFG1_TX_FLOW; 402 } 403 404 /* 405 * Now deal with the speed. 406 */ 407 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 408 maccfg2 |= MACCFG2_IFMODE_GMII; 409 } else { 410 maccfg2 |= MACCFG2_IFMODE_MII; 411 ecntrl &= ~ECNTRL_R100M; 412 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 413 ecntrl |= ECNTRL_R100M; 414 } 415 } 416 417 /* 418 * If things are different, re-init things. 419 */ 420 if (maccfg1 != sc->sc_maccfg1 421 || maccfg2 != sc->sc_maccfg2 422 || ecntrl != sc->sc_ecntrl) { 423 if (sc->sc_if.if_flags & IFF_RUNNING) 424 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 425 sc->sc_maccfg1 = maccfg1; 426 sc->sc_maccfg2 = maccfg2; 427 sc->sc_ecntrl = ecntrl; 428 } 429 } 430 431 #if 0 432 static void 433 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 434 { 435 struct pq3etsec_softc * const sc = ifp->if_softc; 436 437 mii_pollstat(&sc->sc_mii); 438 ether_mediastatus(ifp, ifmr); 439 ifmr->ifm_status = sc->sc_mii.mii_media_status; 440 ifmr->ifm_active = sc->sc_mii.mii_media_active; 441 } 442 443 static int 444 pq3etsec_mediachange(struct ifnet *ifp) 445 { 446 struct pq3etsec_softc * const sc = ifp->if_softc; 447 448 if ((ifp->if_flags & IFF_UP) == 0) 449 return 0; 450 451 int rv = mii_mediachg(&sc->sc_mii); 452 return (rv == ENXIO) ? 0 : rv; 453 } 454 #endif 455 456 static void 457 pq3etsec_attach(device_t parent, device_t self, void *aux) 458 { 459 struct cpunode_softc * const psc = device_private(parent); 460 struct pq3etsec_softc * const sc = device_private(self); 461 struct cpunode_attach_args * const cna = aux; 462 struct cpunode_locators * const cnl = &cna->cna_locs; 463 cfdata_t cf = device_cfdata(self); 464 int error; 465 466 psc->sc_children |= cna->cna_childmask; 467 sc->sc_dev = self; 468 sc->sc_bst = cna->cna_memt; 469 sc->sc_dmat = &booke_bus_dma_tag; 470 471 /* 472 * If we have a common MDIO bus, if all off instance 1. 473 */ 474 device_t miiself = (cf->cf_flags & 0x100) ? tsec_cd.cd_devs[0] : self; 475 476 /* 477 * See if the phy is in the config file... 478 */ 479 if (cf->cf_flags & 0x3f) { 480 sc->sc_phy_addr = (cf->cf_flags & 0x3f) - 1; 481 } else { 482 unsigned char prop_name[20]; 483 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 484 cnl->cnl_instance); 485 sc->sc_phy_addr = board_info_get_number(prop_name); 486 } 487 if (sc->sc_phy_addr != MII_PHY_ANY) 488 aprint_normal(" phy %d", sc->sc_phy_addr); 489 490 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 491 &sc->sc_bsh); 492 if (error) { 493 aprint_error(": error mapping registers: %d\n", error); 494 return; 495 } 496 497 /* 498 * Assume firmware has aready set the mac address and fetch it 499 * before we reinit it. 500 */ 501 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 502 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 503 sc->sc_rctrl = RCTRL_DEFAULT; 504 sc->sc_maccfg2 = MACCFG2_DEFAULT; 505 506 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 507 size_t len; 508 const uint8_t *mac_addr = 509 board_info_get_data("tsec-mac-addr-base", &len); 510 KASSERT(len == ETHER_ADDR_LEN); 511 sc->sc_macstnaddr2 = 512 (mac_addr[1] << 24) 513 | (mac_addr[0] << 16); 514 sc->sc_macstnaddr1 = 515 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 516 | (mac_addr[4] << 16) 517 | (mac_addr[3] << 8) 518 | (mac_addr[2] << 0); 519 #if 0 520 aprint_error(": mac-address unknown\n"); 521 return; 522 #endif 523 } 524 525 char enaddr[ETHER_ADDR_LEN] = { 526 [0] = sc->sc_macstnaddr2 >> 16, 527 [1] = sc->sc_macstnaddr2 >> 24, 528 [2] = sc->sc_macstnaddr1 >> 0, 529 [3] = sc->sc_macstnaddr1 >> 8, 530 [4] = sc->sc_macstnaddr1 >> 16, 531 [5] = sc->sc_macstnaddr1 >> 24, 532 }; 533 534 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 535 if (error) { 536 aprint_error(": failed to init rxq: %d\n", error); 537 return; 538 } 539 540 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 541 if (error) { 542 aprint_error(": failed to init txq: %d\n", error); 543 return; 544 } 545 546 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 547 ETSEC_MAXRXMBUFS, ETSEC_MINRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 548 if (error) { 549 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 550 return; 551 } 552 553 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 554 ETSEC_MAXTXMBUFS, ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 555 if (error) { 556 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 557 return; 558 } 559 560 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 561 pq3etsec_tx_intr, sc); 562 if (sc->sc_tx_ih == NULL) { 563 aprint_error(": failed to establish tx interrupt: %d\n", 564 cnl->cnl_intrs[0]); 565 return; 566 } 567 568 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 569 pq3etsec_rx_intr, sc); 570 if (sc->sc_rx_ih == NULL) { 571 aprint_error(": failed to establish rx interrupt: %d\n", 572 cnl->cnl_intrs[1]); 573 return; 574 } 575 576 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 577 pq3etsec_error_intr, sc); 578 if (sc->sc_error_ih == NULL) { 579 aprint_error(": failed to establish error interrupt: %d\n", 580 cnl->cnl_intrs[2]); 581 return; 582 } 583 584 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 585 pq3etsec_soft_intr, sc); 586 if (sc->sc_soft_ih == NULL) { 587 aprint_error(": failed to establish soft interrupt\n"); 588 return; 589 } 590 591 aprint_normal("\n"); 592 593 etsec_write(sc, ATTR, ATTR_DEFAULT); 594 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 595 596 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 597 598 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 599 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 600 601 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 602 ether_sprintf(enaddr)); 603 604 const char * const xname = device_xname(sc->sc_dev); 605 struct ethercom * const ec = &sc->sc_ec; 606 struct ifnet * const ifp = &ec->ec_if; 607 608 ec->ec_mii = &sc->sc_mii; 609 610 sc->sc_mii.mii_ifp = ifp; 611 sc->sc_mii.mii_readreg = pq3etsec_mii_readreg; 612 sc->sc_mii.mii_writereg = pq3etsec_mii_writereg; 613 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 614 615 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 616 ether_mediastatus); 617 618 if (sc->sc_phy_addr < 32) { 619 mii_attach(miiself, &sc->sc_mii, 0xffffffff, 620 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 621 622 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 623 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 624 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 625 } else { 626 callout_schedule(&sc->sc_mii_callout, hz); 627 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 628 } 629 } else { 630 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 631 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 632 } 633 634 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 635 | ETHERCAP_JUMBO_MTU; 636 637 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 638 ifp->if_softc = sc; 639 ifp->if_capabilities = IFCAP_ETSEC; 640 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 641 ifp->if_ioctl = pq3etsec_ifioctl; 642 ifp->if_start = pq3etsec_ifstart; 643 ifp->if_watchdog = pq3etsec_ifwatchdog; 644 ifp->if_init = pq3etsec_ifinit; 645 ifp->if_stop = pq3etsec_ifstop; 646 IFQ_SET_READY(&ifp->if_snd); 647 648 pq3etsec_ifstop(ifp, true); 649 650 /* 651 * Attach the interface. 652 */ 653 if_attach(ifp); 654 ether_ifattach(ifp, enaddr); 655 656 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 657 NULL, xname, "rx stall"); 658 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 659 NULL, xname, "tx stall"); 660 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 661 NULL, xname, "tx intr"); 662 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 663 NULL, xname, "rx intr"); 664 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 665 NULL, xname, "error intr"); 666 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 667 NULL, xname, "soft intr"); 668 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 669 NULL, xname, "tx pause"); 670 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 671 NULL, xname, "rx pause"); 672 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 673 NULL, xname, "mii ticks"); 674 } 675 676 static uint64_t 677 pq3etsec_macaddr_create(const uint8_t *lladdr) 678 { 679 uint64_t macaddr = 0; 680 681 lladdr += ETHER_ADDR_LEN; 682 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 683 macaddr = (macaddr << 8) | *--lladdr; 684 } 685 return macaddr << 16; 686 } 687 688 static int 689 pq3etsec_ifinit(struct ifnet *ifp) 690 { 691 struct pq3etsec_softc * const sc = ifp->if_softc; 692 int error = 0; 693 694 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 695 if (ifp->if_mtu > ETHERMTU_JUMBO) 696 return error; 697 698 KASSERT(ifp->if_flags & IFF_UP); 699 700 /* 701 * Stop the interface (steps 1 to 4 in the Soft Reset and 702 * Reconfigurating Procedure. 703 */ 704 pq3etsec_ifstop(ifp, 0); 705 706 /* 707 * If our frame size has changed (or it's our first time through) 708 * destroy the existing transmit mapcache. 709 */ 710 if (sc->sc_tx_mapcache != NULL 711 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 712 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 713 sc->sc_tx_mapcache = NULL; 714 } 715 716 if (sc->sc_tx_mapcache == NULL) { 717 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 718 ETSEC_MAXTXMBUFS, ETSEC_MAXTXMBUFS, sc->sc_maxfrm, 719 ETSEC_NTXSEGS); 720 if (error) 721 return error; 722 } 723 724 sc->sc_ev_mii_ticks.ev_count++; 725 mii_tick(&sc->sc_mii); 726 727 if (ifp->if_flags & IFF_PROMISC) { 728 sc->sc_rctrl |= RCTRL_PROM; 729 } else { 730 sc->sc_rctrl &= ~RCTRL_PROM; 731 } 732 733 uint32_t rctrl_prsdep = 0; 734 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 735 if (VLAN_ATTACHED(&sc->sc_ec)) { 736 sc->sc_rctrl |= RCTRL_VLEX; 737 rctrl_prsdep = RCTRL_PRSDEP_L2; 738 } 739 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 740 sc->sc_rctrl |= RCTRL_IPCSEN; 741 rctrl_prsdep = RCTRL_PRSDEP_L3; 742 } 743 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 744 sc->sc_rctrl |= RCTRL_TUCSEN; 745 rctrl_prsdep = RCTRL_PRSDEP_L4; 746 } 747 sc->sc_rctrl |= rctrl_prsdep; 748 #if 0 749 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 750 aprint_normal_dev(sc->sc_dev, 751 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 752 sc->sc_rctrl, 753 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 754 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 755 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 756 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 757 #endif 758 759 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 760 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 761 sc->sc_tctrl |= TCTRL_VLINS; 762 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 763 sc->sc_tctrl |= TCTRL_IPCSEN; 764 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 765 sc->sc_tctrl |= TCTRL_TUCSEN; 766 #if 0 767 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 768 aprint_normal_dev(sc->sc_dev, 769 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 770 sc->sc_tctrl, 771 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 772 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 773 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 774 #endif 775 776 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 777 778 const uint64_t macstnaddr = 779 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 780 781 sc->sc_imask = IEVENT_DPE; 782 783 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 784 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 785 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 786 787 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 788 pq3etsec_txq_reset(sc, &sc->sc_txq); 789 790 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 791 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 792 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 793 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 794 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 795 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 796 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 797 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 798 799 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 800 pq3etsec_mc_setup(sc); 801 802 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 803 etsec_write(sc, MRBLR, MCLBYTES); 804 805 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 806 sc->sc_dmactrl |= DMACTRL_DEFAULT; 807 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 808 809 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 810 etsec_write(sc, TQUEUE, TQUEUE_EN0); 811 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 812 813 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 814 815 /* 12. Enable receive queues in RQUEUE, */ 816 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 817 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 818 819 /* and optionally set TOE functionality in RCTRL. */ 820 etsec_write(sc, RCTRL, sc->sc_rctrl); 821 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 822 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 823 sc->sc_rx_adjlen += sizeof(struct rxfcb); 824 825 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 826 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 827 828 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 829 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 830 831 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 832 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 833 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 834 835 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 836 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 837 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 838 839 sc->sc_soft_flags = 0; 840 841 etsec_write(sc, IMASK, sc->sc_imask); 842 843 ifp->if_flags |= IFF_RUNNING; 844 845 return error; 846 } 847 848 static void 849 pq3etsec_ifstop(struct ifnet *ifp, int disable) 850 { 851 struct pq3etsec_softc * const sc = ifp->if_softc; 852 853 KASSERT(!cpu_intr_p()); 854 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 855 /* 856 * Clear the GTSC and GRSC from the interrupt mask until 857 * we are ready for them. Then clear them from IEVENT, 858 * request the graceful shutdown, and then enable the 859 * GTSC and GRSC bits in the mask. This should cause the 860 * error interrupt to fire which will issue a wakeup to 861 * allow us to resume. 862 */ 863 864 /* 865 * 1. Set GRS/GTS bits in DMACTRL register 866 */ 867 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 868 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 869 etsec_write(sc, IEVENT, imask_gsc_mask); 870 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 871 872 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 873 /* 874 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 875 */ 876 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 877 878 u_int timo = 1000; 879 uint32_t ievent = etsec_read(sc, IEVENT); 880 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 881 if (--timo == 0) { 882 aprint_error_dev(sc->sc_dev, 883 "WARNING: " 884 "request to stop failed (IEVENT=%#x)\n", 885 ievent); 886 break; 887 } 888 delay(10); 889 ievent = etsec_read(sc, IEVENT); 890 } 891 } 892 893 /* 894 * Now reset the controller. 895 * 896 * 3. Set SOFT_RESET bit in MACCFG1 register 897 * 4. Clear SOFT_RESET bit in MACCFG1 register 898 */ 899 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 900 etsec_write(sc, MACCFG1, 0); 901 etsec_write(sc, IMASK, 0); 902 etsec_write(sc, IEVENT, ~0); 903 sc->sc_imask = 0; 904 ifp->if_flags &= ~IFF_RUNNING; 905 906 uint32_t tbipa = etsec_read(sc, TBIPA); 907 if (tbipa == sc->sc_phy_addr) { 908 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 909 etsec_write(sc, TBIPA, 0x1f); 910 } 911 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 912 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 913 etsec_write(sc, MIIMCFG, miimcfg); 914 915 /* 916 * Let's consume any remaing transmitted packets. And if we are 917 * disabling the interface, purge ourselves of any untransmitted 918 * packets. But don't consume any received packets, just drop them. 919 * If we aren't disabling the interface, save the mbufs in the 920 * receive queue for reuse. 921 */ 922 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 923 pq3etsec_txq_consume(sc, &sc->sc_txq); 924 if (disable) { 925 pq3etsec_txq_purge(sc, &sc->sc_txq); 926 IF_PURGE(&ifp->if_snd); 927 } 928 } 929 930 static void 931 pq3etsec_ifwatchdog(struct ifnet *ifp) 932 { 933 } 934 935 static void 936 pq3etsec_mc_setup( 937 struct pq3etsec_softc *sc) 938 { 939 struct ethercom * const ec = &sc->sc_ec; 940 struct ifnet * const ifp = &sc->sc_if; 941 struct ether_multi *enm; 942 struct ether_multistep step; 943 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 944 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 945 946 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 947 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 948 949 ifp->if_flags &= ~IFF_ALLMULTI; 950 951 ETHER_FIRST_MULTI(step, ec, enm); 952 for (u_int i = 0; enm != NULL; ) { 953 const char *addr = enm->enm_addrlo; 954 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 955 ifp->if_flags |= IFF_ALLMULTI; 956 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 957 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 958 break; 959 } 960 if ((sc->sc_rctrl & RCTRL_EMEN) 961 && i < __arraycount(sc->sc_macaddrs)) { 962 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 963 } else { 964 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 965 #if 0 966 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 967 ether_sprintf(addr), crc, 968 crc >> crc_shift, 969 crc >> (crc_shift + 5), 970 (crc >> crc_shift) & 31, 971 1 << (((crc >> crc_shift) & 31) ^ 31)); 972 #endif 973 /* 974 * The documentation doesn't completely follow PowerPC 975 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 976 * is 0x7fa32d9b. By empirical testing, the 977 * corresponding hash bit is word 3, bit 31 (ppc bit 978 * order). Since 3 << 31 | 31 is 0x7f, we deduce 979 * H[0:2] selects the register while H[3:7] selects 980 * the bit (ppc bit order). 981 */ 982 crc >>= crc_shift; 983 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 984 } 985 ETHER_NEXT_MULTI(step, enm); 986 } 987 for (u_int i = 0; i < 8; i++) { 988 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 989 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 990 #if 0 991 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 992 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 993 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 994 i, GADDR(i), etsec_read(sc, GADDR(i))); 995 #endif 996 } 997 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 998 uint64_t macaddr = sc->sc_macaddrs[i]; 999 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1000 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1001 #if 0 1002 if (macaddr) 1003 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1004 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1005 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1006 #endif 1007 } 1008 } 1009 1010 static int 1011 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1012 { 1013 struct pq3etsec_softc *sc = ifp->if_softc; 1014 struct ifreq * const ifr = data; 1015 const int s = splnet(); 1016 int error; 1017 1018 switch (cmd) { 1019 case SIOCSIFMEDIA: 1020 case SIOCGIFMEDIA: 1021 /* Flow control requires full-duplex mode. */ 1022 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1023 (ifr->ifr_media & IFM_FDX) == 0) 1024 ifr->ifr_media &= ~IFM_ETH_FMASK; 1025 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1026 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1027 /* We can do both TXPAUSE and RXPAUSE. */ 1028 ifr->ifr_media |= 1029 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1030 } 1031 } 1032 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1033 break; 1034 1035 default: 1036 error = ether_ioctl(ifp, cmd, data); 1037 if (error != ENETRESET) 1038 break; 1039 1040 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1041 error = 0; 1042 if (ifp->if_flags & IFF_RUNNING) 1043 pq3etsec_mc_setup(sc); 1044 break; 1045 } 1046 error = pq3etsec_ifinit(ifp); 1047 break; 1048 } 1049 1050 splx(s); 1051 return error; 1052 } 1053 1054 static void 1055 pq3etsec_rxq_desc_presync( 1056 struct pq3etsec_softc *sc, 1057 struct pq3etsec_rxqueue *rxq, 1058 volatile struct rxbd *rxbd, 1059 size_t count) 1060 { 1061 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1062 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1063 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1064 } 1065 1066 static void 1067 pq3etsec_rxq_desc_postsync( 1068 struct pq3etsec_softc *sc, 1069 struct pq3etsec_rxqueue *rxq, 1070 volatile struct rxbd *rxbd, 1071 size_t count) 1072 { 1073 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1074 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1075 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1076 } 1077 1078 static void 1079 pq3etsec_txq_desc_presync( 1080 struct pq3etsec_softc *sc, 1081 struct pq3etsec_txqueue *txq, 1082 volatile struct txbd *txbd, 1083 size_t count) 1084 { 1085 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1086 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1087 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1088 } 1089 1090 static void 1091 pq3etsec_txq_desc_postsync( 1092 struct pq3etsec_softc *sc, 1093 struct pq3etsec_txqueue *txq, 1094 volatile struct txbd *txbd, 1095 size_t count) 1096 { 1097 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1098 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1099 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1100 } 1101 1102 static bus_dmamap_t 1103 pq3etsec_mapcache_get( 1104 struct pq3etsec_softc *sc, 1105 struct pq3etsec_mapcache *dmc) 1106 { 1107 if (dmc->dmc_nmaps == 0) { 1108 bus_dmamap_t map; 1109 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1110 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1111 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &map); 1112 if (error) { 1113 aprint_error_dev(sc->sc_dev, 1114 "failed to allocate a %zuB map: %d\n", 1115 dmc->dmc_maxmapsize, error); 1116 return NULL; 1117 } 1118 return map; 1119 } 1120 1121 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1122 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1123 } 1124 1125 static void 1126 pq3etsec_mapcache_put( 1127 struct pq3etsec_softc *sc, 1128 struct pq3etsec_mapcache *dmc, 1129 bus_dmamap_t map) 1130 { 1131 KASSERT(map != NULL); 1132 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1133 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1134 } 1135 1136 static void 1137 pq3etsec_mapcache_destroy( 1138 struct pq3etsec_softc *sc, 1139 struct pq3etsec_mapcache *dmc) 1140 { 1141 const size_t dmc_size = 1142 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1143 1144 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1145 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1146 } 1147 kmem_free(dmc, dmc_size); 1148 } 1149 1150 static int 1151 pq3etsec_mapcache_create( 1152 struct pq3etsec_softc *sc, 1153 struct pq3etsec_mapcache **dmc_p, 1154 size_t maxmaps, 1155 size_t minmaps, 1156 size_t maxmapsize, 1157 size_t maxseg) 1158 { 1159 const size_t dmc_size = 1160 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1161 struct pq3etsec_mapcache * const dmc = kmem_zalloc(dmc_size, KM_SLEEP); 1162 1163 dmc->dmc_maxmaps = maxmaps; 1164 dmc->dmc_nmaps = minmaps; 1165 dmc->dmc_maxmapsize = maxmapsize; 1166 dmc->dmc_maxseg = maxseg; 1167 1168 for (u_int i = 0; i < minmaps; i++) { 1169 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1170 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1171 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1172 if (error) { 1173 aprint_error_dev(sc->sc_dev, 1174 "failed to creat dma map cache " 1175 "entry %u of %zu (max %zu): %d\n", 1176 i, minmaps, maxmaps, error); 1177 while (i-- > 0) { 1178 bus_dmamap_destroy(sc->sc_dmat, 1179 dmc->dmc_maps[i]); 1180 } 1181 kmem_free(dmc, dmc_size); 1182 return error; 1183 } 1184 KASSERT(dmc->dmc_maps[i] != NULL); 1185 } 1186 1187 *dmc_p = dmc; 1188 1189 return 0; 1190 } 1191 1192 #if 0 1193 static void 1194 pq3etsec_dmamem_free( 1195 bus_dma_tag_t dmat, 1196 size_t map_size, 1197 bus_dma_segment_t *seg, 1198 bus_dmamap_t map, 1199 void *kvap) 1200 { 1201 bus_dmamap_destroy(dmat, map); 1202 bus_dmamem_unmap(dmat, kvap, map_size); 1203 bus_dmamem_free(dmat, seg, 1); 1204 } 1205 #endif 1206 1207 static int 1208 pq3etsec_dmamem_alloc( 1209 bus_dma_tag_t dmat, 1210 size_t map_size, 1211 bus_dma_segment_t *seg, 1212 bus_dmamap_t *map, 1213 void **kvap) 1214 { 1215 int error; 1216 int nseg; 1217 1218 *kvap = NULL; 1219 *map = NULL; 1220 1221 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1222 seg, 1, &nseg, 0); 1223 if (error) 1224 return error; 1225 1226 KASSERT(nseg == 1); 1227 1228 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1229 BUS_DMA_COHERENT); 1230 if (error == 0) { 1231 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1232 map); 1233 if (error == 0) { 1234 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1235 NULL, 0); 1236 if (error == 0) 1237 return 0; 1238 bus_dmamap_destroy(dmat, *map); 1239 *map = NULL; 1240 } 1241 bus_dmamem_unmap(dmat, *kvap, map_size); 1242 *kvap = NULL; 1243 } 1244 bus_dmamem_free(dmat, seg, nseg); 1245 return 0; 1246 } 1247 1248 static struct mbuf * 1249 pq3etsec_rx_buf_alloc( 1250 struct pq3etsec_softc *sc) 1251 { 1252 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1253 if (m == NULL) { 1254 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1255 return NULL; 1256 } 1257 MCLGET(m, M_DONTWAIT); 1258 if ((m->m_flags & M_EXT) == 0) { 1259 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1260 m_freem(m); 1261 return NULL; 1262 } 1263 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1264 1265 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1266 if (map == NULL) { 1267 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1268 m_freem(m); 1269 return NULL; 1270 } 1271 M_SETCTX(m, map); 1272 m->m_len = m->m_pkthdr.len = MCLBYTES; 1273 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1274 BUS_DMA_READ|BUS_DMA_NOWAIT); 1275 if (error) { 1276 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1277 error); 1278 M_SETCTX(m, NULL); 1279 m_freem(m); 1280 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1281 return NULL; 1282 } 1283 KASSERT(map->dm_mapsize == MCLBYTES); 1284 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1285 BUS_DMASYNC_PREREAD); 1286 1287 return m; 1288 } 1289 1290 static void 1291 pq3etsec_rx_map_unload( 1292 struct pq3etsec_softc *sc, 1293 struct mbuf *m) 1294 { 1295 KASSERT(m); 1296 for (; m != NULL; m = m->m_next) { 1297 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1298 KASSERT(map); 1299 KASSERT(map->dm_mapsize == MCLBYTES); 1300 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1301 BUS_DMASYNC_POSTREAD); 1302 bus_dmamap_unload(sc->sc_dmat, map); 1303 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1304 M_SETCTX(m, NULL); 1305 } 1306 } 1307 1308 static bool 1309 pq3etsec_rxq_produce( 1310 struct pq3etsec_softc *sc, 1311 struct pq3etsec_rxqueue *rxq) 1312 { 1313 volatile struct rxbd *producer = rxq->rxq_producer; 1314 #if 0 1315 size_t inuse = rxq->rxq_inuse; 1316 #endif 1317 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1318 struct mbuf *m; 1319 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1320 if (m == NULL) { 1321 m = pq3etsec_rx_buf_alloc(sc); 1322 if (m == NULL) { 1323 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1324 break; 1325 } 1326 } 1327 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1328 KASSERT(map); 1329 1330 #ifdef ETSEC_DEBUG 1331 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1332 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1333 #endif 1334 1335 /* rxbd_len is write-only by the ETSEC */ 1336 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1337 membar_producer(); 1338 producer->rxbd_flags |= RXBD_E; 1339 if (__predict_false(rxq->rxq_mhead == NULL)) { 1340 KASSERT(producer == rxq->rxq_consumer); 1341 rxq->rxq_mconsumer = m; 1342 } 1343 *rxq->rxq_mtail = m; 1344 rxq->rxq_mtail = &m->m_next; 1345 m->m_len = MCLBYTES; 1346 m->m_next = NULL; 1347 rxq->rxq_inuse++; 1348 if (++producer == rxq->rxq_last) { 1349 membar_producer(); 1350 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1351 rxq->rxq_last - rxq->rxq_producer); 1352 producer = rxq->rxq_producer = rxq->rxq_first; 1353 } 1354 } 1355 if (producer != rxq->rxq_producer) { 1356 membar_producer(); 1357 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1358 producer - rxq->rxq_producer); 1359 rxq->rxq_producer = producer; 1360 } 1361 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1362 if (qhlt) { 1363 KASSERT(qhlt & rxq->rxq_qmask); 1364 sc->sc_ev_rx_stall.ev_count++; 1365 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1366 } 1367 #if 0 1368 aprint_normal_dev(sc->sc_dev, 1369 "%s: buffers inuse went from %zu to %zu\n", 1370 __func__, inuse, rxq->rxq_inuse); 1371 #endif 1372 return true; 1373 } 1374 1375 static bool 1376 pq3etsec_rx_offload( 1377 struct pq3etsec_softc *sc, 1378 struct mbuf *m, 1379 const struct rxfcb *fcb) 1380 { 1381 if (fcb->rxfcb_flags & RXFCB_VLN) { 1382 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl, 1383 m_freem(m); return false); 1384 } 1385 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1386 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1387 return true; 1388 int csum_flags = 0; 1389 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1390 csum_flags |= M_CSUM_IPv4; 1391 if (fcb->rxfcb_flags & RXFCB_EIP) 1392 csum_flags |= M_CSUM_IPv4_BAD; 1393 } 1394 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1395 int ipv_flags; 1396 if (fcb->rxfcb_flags & RXFCB_IP6) 1397 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1398 else 1399 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1400 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1401 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1402 } else { 1403 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1404 } 1405 if (fcb->rxfcb_flags & RXFCB_ETU) 1406 csum_flags |= M_CSUM_TCP_UDP_BAD; 1407 } 1408 1409 m->m_pkthdr.csum_flags = csum_flags; 1410 return true; 1411 } 1412 1413 static void 1414 pq3etsec_rx_input( 1415 struct pq3etsec_softc *sc, 1416 struct mbuf *m, 1417 uint16_t rxbd_flags) 1418 { 1419 struct ifnet * const ifp = &sc->sc_if; 1420 1421 pq3etsec_rx_map_unload(sc, m); 1422 1423 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1424 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1425 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1426 return; 1427 } 1428 m_adj(m, sc->sc_rx_adjlen); 1429 1430 if (rxbd_flags & RXBD_M) 1431 m->m_flags |= M_PROMISC; 1432 if (rxbd_flags & RXBD_BC) 1433 m->m_flags |= M_BCAST; 1434 if (rxbd_flags & RXBD_MC) 1435 m->m_flags |= M_MCAST; 1436 m->m_flags |= M_HASFCS; 1437 m->m_pkthdr.rcvif = &sc->sc_if; 1438 1439 ifp->if_ipackets++; 1440 ifp->if_ibytes += m->m_pkthdr.len; 1441 1442 /* 1443 * Let's give it to the network subsystm to deal with. 1444 */ 1445 int s = splnet(); 1446 bpf_mtap(ifp, m); 1447 (*ifp->if_input)(ifp, m); 1448 splx(s); 1449 } 1450 1451 static void 1452 pq3etsec_rxq_consume( 1453 struct pq3etsec_softc *sc, 1454 struct pq3etsec_rxqueue *rxq) 1455 { 1456 struct ifnet * const ifp = &sc->sc_if; 1457 volatile struct rxbd *consumer = rxq->rxq_consumer; 1458 size_t rxconsumed = 0; 1459 1460 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1461 1462 for (;;) { 1463 if (consumer == rxq->rxq_producer) { 1464 rxq->rxq_consumer = consumer; 1465 rxq->rxq_inuse -= rxconsumed; 1466 KASSERT(rxq->rxq_inuse == 0); 1467 return; 1468 } 1469 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1470 const uint16_t rxbd_flags = consumer->rxbd_flags; 1471 if (rxbd_flags & RXBD_E) { 1472 rxq->rxq_consumer = consumer; 1473 rxq->rxq_inuse -= rxconsumed; 1474 return; 1475 } 1476 KASSERT(rxq->rxq_mconsumer != NULL); 1477 #ifdef ETSEC_DEBUG 1478 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1479 #endif 1480 #if 0 1481 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1482 __func__, 1483 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1484 mtod(rxq->rxq_mconsumer, int *)[0], 1485 mtod(rxq->rxq_mconsumer, int *)[1], 1486 mtod(rxq->rxq_mconsumer, int *)[2], 1487 mtod(rxq->rxq_mconsumer, int *)[3]); 1488 #endif 1489 /* 1490 * We own this packet again. Clear all flags except wrap. 1491 */ 1492 rxconsumed++; 1493 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1494 1495 /* 1496 * If this descriptor has the LAST bit set and no errors, 1497 * it's a valid input packet. 1498 */ 1499 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1500 size_t rxbd_len = consumer->rxbd_len; 1501 struct mbuf *m = rxq->rxq_mhead; 1502 struct mbuf *m_last = rxq->rxq_mconsumer; 1503 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1504 rxq->rxq_mtail = &rxq->rxq_mhead; 1505 rxq->rxq_mconsumer = rxq->rxq_mhead; 1506 m_last->m_next = NULL; 1507 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1508 m->m_pkthdr.len = rxbd_len; 1509 pq3etsec_rx_input(sc, m, rxbd_flags); 1510 } else if (rxbd_flags & RXBD_L) { 1511 KASSERT(rxbd_flags & RXBD_ERRORS); 1512 struct mbuf *m; 1513 /* 1514 * We encountered an error, take the mbufs and add 1515 * then to the rx bufcache so we can reuse them. 1516 */ 1517 ifp->if_ierrors++; 1518 for (m = rxq->rxq_mhead; 1519 m != rxq->rxq_mconsumer; 1520 m = m->m_next) { 1521 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1522 } 1523 m = rxq->rxq_mconsumer; 1524 if ((rxq->rxq_mhead = m->m_next) == NULL) 1525 rxq->rxq_mtail = &rxq->rxq_mhead; 1526 rxq->rxq_mconsumer = m->m_next; 1527 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1528 } else { 1529 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1530 } 1531 #ifdef ETSEC_DEBUG 1532 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1533 #endif 1534 1535 /* 1536 * Wrap at the last entry! 1537 */ 1538 if (rxbd_flags & RXBD_W) { 1539 KASSERT(consumer + 1 == rxq->rxq_last); 1540 consumer = rxq->rxq_first; 1541 } else { 1542 consumer++; 1543 } 1544 #ifdef ETSEC_DEBUG 1545 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1546 #endif 1547 } 1548 } 1549 1550 static void 1551 pq3etsec_rxq_purge( 1552 struct pq3etsec_softc *sc, 1553 struct pq3etsec_rxqueue *rxq, 1554 bool discard) 1555 { 1556 struct mbuf *m; 1557 1558 if ((m = rxq->rxq_mhead) != NULL) { 1559 #ifdef ETSEC_DEBUG 1560 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1561 #endif 1562 1563 if (discard) { 1564 pq3etsec_rx_map_unload(sc, m); 1565 m_freem(m); 1566 } else { 1567 while (m != NULL) { 1568 struct mbuf *m0 = m->m_next; 1569 m->m_next = NULL; 1570 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1571 m = m0; 1572 } 1573 } 1574 1575 } 1576 1577 rxq->rxq_mconsumer = NULL; 1578 rxq->rxq_mhead = NULL; 1579 rxq->rxq_mtail = &rxq->rxq_mhead; 1580 rxq->rxq_inuse = 0; 1581 } 1582 1583 static void 1584 pq3etsec_rxq_reset( 1585 struct pq3etsec_softc *sc, 1586 struct pq3etsec_rxqueue *rxq) 1587 { 1588 /* 1589 * sync all the descriptors 1590 */ 1591 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1592 rxq->rxq_last - rxq->rxq_first); 1593 1594 /* 1595 * Make sure we own all descriptors in the ring. 1596 */ 1597 volatile struct rxbd *rxbd; 1598 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1599 rxbd->rxbd_flags = RXBD_I; 1600 } 1601 1602 /* 1603 * Last descriptor has the wrap flag. 1604 */ 1605 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1606 1607 /* 1608 * Reset the producer consumer indexes. 1609 */ 1610 rxq->rxq_consumer = rxq->rxq_first; 1611 rxq->rxq_producer = rxq->rxq_first; 1612 rxq->rxq_inuse = 0; 1613 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1614 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1615 1616 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1617 1618 /* 1619 * Restart the transmit at the first descriptor 1620 */ 1621 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1622 } 1623 1624 static int 1625 pq3etsec_rxq_attach( 1626 struct pq3etsec_softc *sc, 1627 struct pq3etsec_rxqueue *rxq, 1628 u_int qno) 1629 { 1630 size_t map_size = PAGE_SIZE; 1631 size_t desc_count = map_size / sizeof(struct rxbd); 1632 int error; 1633 void *descs; 1634 1635 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1636 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1637 if (error) 1638 return error; 1639 1640 memset(descs, 0, map_size); 1641 rxq->rxq_first = descs; 1642 rxq->rxq_last = rxq->rxq_first + desc_count; 1643 rxq->rxq_consumer = descs; 1644 rxq->rxq_producer = descs; 1645 1646 pq3etsec_rxq_purge(sc, rxq, true); 1647 pq3etsec_rxq_reset(sc, rxq); 1648 1649 rxq->rxq_reg_rbase = RBASEn(qno); 1650 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1651 1652 return 0; 1653 } 1654 1655 static bool 1656 pq3etsec_txq_active_p( 1657 struct pq3etsec_softc * const sc, 1658 struct pq3etsec_txqueue *txq) 1659 { 1660 return !IF_IS_EMPTY(&txq->txq_mbufs); 1661 } 1662 1663 static bool 1664 pq3etsec_txq_fillable_p( 1665 struct pq3etsec_softc * const sc, 1666 struct pq3etsec_txqueue *txq) 1667 { 1668 return txq->txq_free >= txq->txq_threshold; 1669 } 1670 1671 static int 1672 pq3etsec_txq_attach( 1673 struct pq3etsec_softc *sc, 1674 struct pq3etsec_txqueue *txq, 1675 u_int qno) 1676 { 1677 size_t map_size = PAGE_SIZE; 1678 size_t desc_count = map_size / sizeof(struct txbd); 1679 int error; 1680 void *descs; 1681 1682 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1683 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1684 if (error) 1685 return error; 1686 1687 memset(descs, 0, map_size); 1688 txq->txq_first = descs; 1689 txq->txq_last = txq->txq_first + desc_count; 1690 txq->txq_consumer = descs; 1691 txq->txq_producer = descs; 1692 1693 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1694 1695 txq->txq_reg_tbase = TBASEn(qno); 1696 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1697 1698 pq3etsec_txq_reset(sc, txq); 1699 1700 return 0; 1701 } 1702 1703 static int 1704 pq3etsec_txq_map_load( 1705 struct pq3etsec_softc *sc, 1706 struct pq3etsec_txqueue *txq, 1707 struct mbuf *m) 1708 { 1709 bus_dmamap_t map; 1710 int error; 1711 1712 map = M_GETCTX(m, bus_dmamap_t); 1713 if (map != NULL) 1714 return 0; 1715 1716 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1717 if (map == NULL) 1718 return ENOMEM; 1719 1720 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1721 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1722 if (error) 1723 return error; 1724 1725 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1726 BUS_DMASYNC_PREWRITE); 1727 M_SETCTX(m, map); 1728 return 0; 1729 } 1730 1731 static void 1732 pq3etsec_txq_map_unload( 1733 struct pq3etsec_softc *sc, 1734 struct pq3etsec_txqueue *txq, 1735 struct mbuf *m) 1736 { 1737 KASSERT(m); 1738 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1739 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1740 BUS_DMASYNC_POSTWRITE); 1741 bus_dmamap_unload(sc->sc_dmat, map); 1742 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1743 } 1744 1745 static bool 1746 pq3etsec_txq_produce( 1747 struct pq3etsec_softc *sc, 1748 struct pq3etsec_txqueue *txq, 1749 struct mbuf *m) 1750 { 1751 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1752 1753 if (map->dm_nsegs > txq->txq_free) 1754 return false; 1755 1756 /* 1757 * TCP Offload flag must be set in the first descriptor. 1758 */ 1759 volatile struct txbd *producer = txq->txq_producer; 1760 uint16_t last_flags = TXBD_L; 1761 uint16_t first_flags = TXBD_R 1762 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1763 1764 /* 1765 * If we've produced enough descriptors without consuming any 1766 * we need to ask for an interrupt to reclaim some. 1767 */ 1768 txq->txq_lastintr += map->dm_nsegs; 1769 if (txq->txq_lastintr >= txq->txq_threshold 1770 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1771 txq->txq_lastintr = 0; 1772 last_flags |= TXBD_I; 1773 } 1774 1775 #ifdef ETSEC_DEBUG 1776 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1777 #endif 1778 KASSERT(producer != txq->txq_last); 1779 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1780 producer->txbd_len = map->dm_segs[0].ds_len; 1781 1782 if (map->dm_nsegs > 1) { 1783 volatile struct txbd *start = producer + 1; 1784 size_t count = map->dm_nsegs - 1; 1785 for (u_int i = 1; i < map->dm_nsegs; i++) { 1786 if (__predict_false(++producer == txq->txq_last)) { 1787 producer = txq->txq_first; 1788 if (start < txq->txq_last) { 1789 pq3etsec_txq_desc_presync(sc, txq, 1790 start, txq->txq_last - start); 1791 count -= txq->txq_last - start; 1792 } 1793 start = txq->txq_first; 1794 } 1795 #ifdef ETSEC_DEBUG 1796 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1797 #endif 1798 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1799 producer->txbd_len = map->dm_segs[i].ds_len; 1800 producer->txbd_flags = TXBD_R 1801 | (producer->txbd_flags & TXBD_W) 1802 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1803 #if 0 1804 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1805 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1806 #endif 1807 } 1808 pq3etsec_txq_desc_presync(sc, txq, start, count); 1809 } else { 1810 first_flags |= last_flags; 1811 } 1812 1813 membar_producer(); 1814 txq->txq_producer->txbd_flags = 1815 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1816 #if 0 1817 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1818 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1819 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1820 #endif 1821 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1822 1823 /* 1824 * Reduce free count by the number of segments we consumed. 1825 */ 1826 txq->txq_free -= map->dm_nsegs; 1827 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1828 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1829 KASSERT(producer->txbd_flags & TXBD_L); 1830 #ifdef ETSEC_DEBUG 1831 txq->txq_lmbufs[producer - txq->txq_first] = m; 1832 #endif 1833 1834 #if 0 1835 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1836 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1837 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1838 #endif 1839 1840 if (++producer == txq->txq_last) 1841 txq->txq_producer = txq->txq_first; 1842 else 1843 txq->txq_producer = producer; 1844 IF_ENQUEUE(&txq->txq_mbufs, m); 1845 1846 /* 1847 * Restart the transmitter. 1848 */ 1849 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 1850 1851 return true; 1852 } 1853 1854 static void 1855 pq3etsec_tx_offload( 1856 struct pq3etsec_softc *sc, 1857 struct pq3etsec_txqueue *txq, 1858 struct mbuf **mp) 1859 { 1860 struct mbuf *m = *mp; 1861 u_int csum_flags = m->m_pkthdr.csum_flags; 1862 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m); 1863 1864 KASSERT(m->m_flags & M_PKTHDR); 1865 1866 /* 1867 * Let see if we are doing any offload first. 1868 */ 1869 if (csum_flags == 0 && vtag == 0) { 1870 m->m_flags &= ~M_HASFCB; 1871 return; 1872 } 1873 1874 uint16_t flags = 0; 1875 if (csum_flags & M_CSUM_IP) { 1876 flags |= TXFCB_IP 1877 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 1878 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 1879 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 1880 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 1881 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 1882 } 1883 if (vtag) { 1884 flags |= TXFCB_VLN; 1885 } 1886 if (flags == 0) { 1887 m->m_flags &= ~M_HASFCB; 1888 return; 1889 } 1890 1891 struct txfcb fcb; 1892 fcb.txfcb_flags = flags; 1893 if (csum_flags & M_CSUM_IPv4) 1894 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 1895 else 1896 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); 1897 fcb.txfcb_l3os = ETHER_HDR_LEN; 1898 fcb.txfcb_phcs = 0; 1899 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0; 1900 1901 #if 0 1902 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 1903 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 1904 fcb.txfcb_phcs, fcb.txfcb_vlctl); 1905 #endif 1906 1907 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 1908 m->m_data -= sizeof(fcb); 1909 m->m_len += sizeof(fcb); 1910 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 1911 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 1912 m->m_data = m->m_pktdat; 1913 m->m_len += sizeof(fcb); 1914 } else { 1915 struct mbuf *mn; 1916 MGET(mn, M_DONTWAIT, m->m_type); 1917 if (mn == NULL) { 1918 if (csum_flags & M_CSUM_IP4) { 1919 #ifdef INET 1920 ip_undefer_csum(m, ETHER_HDR_LEN, 1921 csum_flags & M_CSUM_IP4); 1922 #else 1923 panic("%s: impossible M_CSUM flags %#x", 1924 device_xname(sc->sc_dev), csum_flags); 1925 #endif 1926 } else if (csum_flags & M_CSUM_IP6) { 1927 #ifdef INET6 1928 ip6_undefer_csum(m, ETHER_HDR_LEN, 1929 csum_flags & M_CSUM_IP6); 1930 #else 1931 panic("%s: impossible M_CSUM flags %#x", 1932 device_xname(sc->sc_dev), csum_flags); 1933 #endif 1934 } else if (vtag) { 1935 } 1936 1937 m->m_flags &= ~M_HASFCB; 1938 return; 1939 } 1940 1941 M_MOVE_PKTHDR(mn, m); 1942 mn->m_next = m; 1943 m = mn; 1944 MH_ALIGN(m, sizeof(fcb)); 1945 m->m_len = sizeof(fcb); 1946 *mp = m; 1947 } 1948 m->m_pkthdr.len += sizeof(fcb); 1949 m->m_flags |= M_HASFCB; 1950 *mtod(m, struct txfcb *) = fcb; 1951 return; 1952 } 1953 1954 static bool 1955 pq3etsec_txq_enqueue( 1956 struct pq3etsec_softc *sc, 1957 struct pq3etsec_txqueue *txq) 1958 { 1959 for (;;) { 1960 if (IF_QFULL(&txq->txq_mbufs)) 1961 return false; 1962 struct mbuf *m = txq->txq_next; 1963 if (m == NULL) { 1964 int s = splnet(); 1965 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1966 splx(s); 1967 if (m == NULL) 1968 return true; 1969 M_SETCTX(m, NULL); 1970 pq3etsec_tx_offload(sc, txq, &m); 1971 } else { 1972 txq->txq_next = NULL; 1973 } 1974 int error = pq3etsec_txq_map_load(sc, txq, m); 1975 if (error) { 1976 aprint_error_dev(sc->sc_dev, 1977 "discarded packet due to " 1978 "dmamap load failure: %d\n", error); 1979 m_freem(m); 1980 continue; 1981 } 1982 KASSERT(txq->txq_next == NULL); 1983 if (!pq3etsec_txq_produce(sc, txq, m)) { 1984 txq->txq_next = m; 1985 return false; 1986 } 1987 KASSERT(txq->txq_next == NULL); 1988 } 1989 } 1990 1991 static bool 1992 pq3etsec_txq_consume( 1993 struct pq3etsec_softc *sc, 1994 struct pq3etsec_txqueue *txq) 1995 { 1996 struct ifnet * const ifp = &sc->sc_if; 1997 volatile struct txbd *consumer = txq->txq_consumer; 1998 size_t txfree = 0; 1999 2000 #if 0 2001 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2002 #endif 2003 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2004 2005 for (;;) { 2006 if (consumer == txq->txq_producer) { 2007 txq->txq_consumer = consumer; 2008 txq->txq_free += txfree; 2009 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2010 #if 0 2011 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2012 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2013 #endif 2014 KASSERT(txq->txq_lastintr == 0); 2015 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2016 return true; 2017 } 2018 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2019 const uint16_t txbd_flags = consumer->txbd_flags; 2020 if (txbd_flags & TXBD_R) { 2021 txq->txq_consumer = consumer; 2022 txq->txq_free += txfree; 2023 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2024 #if 0 2025 printf("%s: freed %zu descriptors\n", 2026 __func__, txfree); 2027 #endif 2028 return pq3etsec_txq_fillable_p(sc, txq); 2029 } 2030 2031 /* 2032 * If this is the last descriptor in the chain, get the 2033 * mbuf, free its dmamap, and free the mbuf chain itself. 2034 */ 2035 if (txbd_flags & TXBD_L) { 2036 struct mbuf *m; 2037 2038 IF_DEQUEUE(&txq->txq_mbufs, m); 2039 #ifdef ETSEC_DEBUG 2040 KASSERTMSG( 2041 m == txq->txq_lmbufs[consumer-txq->txq_first], 2042 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2043 __func__, consumer, consumer - txq->txq_first, 2044 txbd_flags, m, 2045 &txq->txq_lmbufs[consumer-txq->txq_first], 2046 txq->txq_lmbufs[consumer-txq->txq_first]); 2047 #endif 2048 KASSERT(m); 2049 pq3etsec_txq_map_unload(sc, txq, m); 2050 #if 0 2051 printf("%s: mbuf %p: consumed a %u byte packet\n", 2052 __func__, m, m->m_pkthdr.len); 2053 #endif 2054 if (m->m_flags & M_HASFCB) 2055 m_adj(m, sizeof(struct txfcb)); 2056 ifp->if_opackets++; 2057 ifp->if_obytes += m->m_pkthdr.len; 2058 if (m->m_flags & M_MCAST) 2059 ifp->if_omcasts++; 2060 if (txbd_flags & TXBD_ERRORS) 2061 ifp->if_oerrors++; 2062 m_freem(m); 2063 #ifdef ETSEC_DEBUG 2064 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2065 #endif 2066 } else { 2067 #ifdef ETSEC_DEBUG 2068 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2069 #endif 2070 } 2071 2072 /* 2073 * We own this packet again. Clear all flags except wrap. 2074 */ 2075 txfree++; 2076 //consumer->txbd_flags = txbd_flags & TXBD_W; 2077 2078 /* 2079 * Wrap at the last entry! 2080 */ 2081 if (txbd_flags & TXBD_W) { 2082 KASSERT(consumer + 1 == txq->txq_last); 2083 consumer = txq->txq_first; 2084 } else { 2085 consumer++; 2086 KASSERT(consumer < txq->txq_last); 2087 } 2088 } 2089 } 2090 2091 static void 2092 pq3etsec_txq_purge( 2093 struct pq3etsec_softc *sc, 2094 struct pq3etsec_txqueue *txq) 2095 { 2096 struct mbuf *m; 2097 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2098 2099 for (;;) { 2100 IF_DEQUEUE(&txq->txq_mbufs, m); 2101 if (m == NULL) 2102 break; 2103 pq3etsec_txq_map_unload(sc, txq, m); 2104 m_freem(m); 2105 } 2106 if ((m = txq->txq_next) != NULL) { 2107 txq->txq_next = NULL; 2108 pq3etsec_txq_map_unload(sc, txq, m); 2109 m_freem(m); 2110 } 2111 #ifdef ETSEC_DEBUG 2112 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2113 #endif 2114 } 2115 2116 static void 2117 pq3etsec_txq_reset( 2118 struct pq3etsec_softc *sc, 2119 struct pq3etsec_txqueue *txq) 2120 { 2121 /* 2122 * sync all the descriptors 2123 */ 2124 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2125 txq->txq_last - txq->txq_first); 2126 2127 /* 2128 * Make sure we own all descriptors in the ring. 2129 */ 2130 volatile struct txbd *txbd; 2131 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2132 txbd->txbd_flags = 0; 2133 } 2134 2135 /* 2136 * Last descriptor has the wrap flag. 2137 */ 2138 txbd->txbd_flags = TXBD_W; 2139 2140 /* 2141 * Reset the producer consumer indexes. 2142 */ 2143 txq->txq_consumer = txq->txq_first; 2144 txq->txq_producer = txq->txq_first; 2145 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2146 txq->txq_threshold = txq->txq_free / 2; 2147 txq->txq_lastintr = 0; 2148 2149 /* 2150 * What do we want to get interrupted on? 2151 */ 2152 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2153 2154 /* 2155 * Restart the transmit at the first descriptor 2156 */ 2157 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2158 } 2159 2160 static void 2161 pq3etsec_ifstart(struct ifnet *ifp) 2162 { 2163 struct pq3etsec_softc * const sc = ifp->if_softc; 2164 2165 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2166 softint_schedule(sc->sc_soft_ih); 2167 } 2168 2169 static void 2170 pq3etsec_tx_error( 2171 struct pq3etsec_softc * const sc) 2172 { 2173 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2174 2175 pq3etsec_txq_consume(sc, txq); 2176 2177 if (pq3etsec_txq_fillable_p(sc, txq)) 2178 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2179 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2180 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2181 } 2182 2183 if (pq3etsec_txq_active_p(sc, txq)) 2184 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2185 if (!pq3etsec_txq_enqueue(sc, txq)) { 2186 sc->sc_ev_tx_stall.ev_count++; 2187 sc->sc_if.if_flags |= IFF_OACTIVE; 2188 } 2189 2190 sc->sc_txerrors = 0; 2191 } 2192 2193 int 2194 pq3etsec_tx_intr(void *arg) 2195 { 2196 struct pq3etsec_softc * const sc = arg; 2197 2198 sc->sc_ev_tx_intr.ev_count++; 2199 2200 uint32_t ievent = etsec_read(sc, IEVENT); 2201 ievent &= IEVENT_TXF|IEVENT_TXB; 2202 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2203 2204 #if 0 2205 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2206 __func__, ievent, etsec_read(sc, IMASK)); 2207 #endif 2208 2209 if (ievent == 0) 2210 return 0; 2211 2212 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2213 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2214 etsec_write(sc, IMASK, sc->sc_imask); 2215 softint_schedule(sc->sc_soft_ih); 2216 return 1; 2217 } 2218 2219 int 2220 pq3etsec_rx_intr(void *arg) 2221 { 2222 struct pq3etsec_softc * const sc = arg; 2223 2224 sc->sc_ev_rx_intr.ev_count++; 2225 2226 uint32_t ievent = etsec_read(sc, IEVENT); 2227 ievent &= IEVENT_RXF|IEVENT_RXB; 2228 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2229 if (ievent == 0) 2230 return 0; 2231 2232 #if 0 2233 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2234 #endif 2235 2236 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2237 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2238 etsec_write(sc, IMASK, sc->sc_imask); 2239 softint_schedule(sc->sc_soft_ih); 2240 return 1; 2241 } 2242 2243 int 2244 pq3etsec_error_intr(void *arg) 2245 { 2246 struct pq3etsec_softc * const sc = arg; 2247 2248 sc->sc_ev_error_intr.ev_count++; 2249 2250 for (int rv = 0, soft_flags = 0;; rv = 1) { 2251 uint32_t ievent = etsec_read(sc, IEVENT); 2252 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2253 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2254 if (ievent == 0) { 2255 if (soft_flags) { 2256 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2257 softint_schedule(sc->sc_soft_ih); 2258 } 2259 return rv; 2260 } 2261 #if 0 2262 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2263 __func__, ievent, etsec_read(sc, IMASK)); 2264 #endif 2265 2266 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2267 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2268 etsec_write(sc, IMASK, sc->sc_imask); 2269 wakeup(sc); 2270 } 2271 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2272 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2273 etsec_write(sc, IMASK, sc->sc_imask); 2274 wakeup(&sc->sc_mii); 2275 } 2276 if (ievent & IEVENT_BSY) { 2277 soft_flags |= SOFT_RXBSY; 2278 sc->sc_imask &= ~IEVENT_BSY; 2279 etsec_write(sc, IMASK, sc->sc_imask); 2280 } 2281 if (ievent & IEVENT_TXE) { 2282 soft_flags |= SOFT_TXERROR; 2283 sc->sc_imask &= ~IEVENT_TXE; 2284 sc->sc_txerrors |= ievent; 2285 } 2286 if (ievent & IEVENT_TXC) { 2287 sc->sc_ev_tx_pause.ev_count++; 2288 } 2289 if (ievent & IEVENT_RXC) { 2290 sc->sc_ev_rx_pause.ev_count++; 2291 } 2292 if (ievent & IEVENT_DPE) { 2293 soft_flags |= SOFT_RESET; 2294 sc->sc_imask &= ~IEVENT_DPE; 2295 etsec_write(sc, IMASK, sc->sc_imask); 2296 } 2297 } 2298 } 2299 2300 void 2301 pq3etsec_soft_intr(void *arg) 2302 { 2303 struct pq3etsec_softc * const sc = arg; 2304 struct ifnet * const ifp = &sc->sc_if; 2305 2306 mutex_enter(sc->sc_lock); 2307 2308 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2309 2310 sc->sc_ev_soft_intr.ev_count++; 2311 2312 if (soft_flags & SOFT_RESET) { 2313 int s = splnet(); 2314 pq3etsec_ifinit(ifp); 2315 splx(s); 2316 soft_flags = 0; 2317 } 2318 2319 if (soft_flags & SOFT_RXBSY) { 2320 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2321 size_t threshold = 5 * rxq->rxq_threshold / 4; 2322 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2323 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2324 } else { 2325 sc->sc_imask |= IEVENT_BSY; 2326 } 2327 aprint_normal_dev(sc->sc_dev, 2328 "increasing receive buffers from %zu to %zu\n", 2329 rxq->rxq_threshold, threshold); 2330 rxq->rxq_threshold = threshold; 2331 } 2332 2333 if ((soft_flags & SOFT_TXINTR) 2334 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2335 /* 2336 * Let's do what we came here for. Consume transmitted 2337 * packets off the the transmit ring. 2338 */ 2339 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2340 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2341 sc->sc_ev_tx_stall.ev_count++; 2342 ifp->if_flags |= IFF_OACTIVE; 2343 } else { 2344 ifp->if_flags &= ~IFF_OACTIVE; 2345 } 2346 sc->sc_imask |= IEVENT_TXF; 2347 } 2348 2349 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2350 /* 2351 * Let's consume 2352 */ 2353 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2354 sc->sc_imask |= IEVENT_RXF; 2355 } 2356 2357 if (soft_flags & SOFT_TXERROR) { 2358 pq3etsec_tx_error(sc); 2359 sc->sc_imask |= IEVENT_TXE; 2360 } 2361 2362 if (ifp->if_flags & IFF_RUNNING) { 2363 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2364 etsec_write(sc, IMASK, sc->sc_imask); 2365 } else { 2366 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2367 } 2368 2369 mutex_exit(sc->sc_lock); 2370 } 2371 2372 static void 2373 pq3etsec_mii_tick(void *arg) 2374 { 2375 struct pq3etsec_softc * const sc = arg; 2376 mutex_enter(sc->sc_lock); 2377 callout_ack(&sc->sc_mii_callout); 2378 sc->sc_ev_mii_ticks.ev_count++; 2379 #ifdef DEBUG 2380 uint64_t now = mftb(); 2381 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2382 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2383 __func__, now - sc->sc_mii_last_tick); 2384 callout_stop(&sc->sc_mii_callout); 2385 } 2386 #endif 2387 mii_tick(&sc->sc_mii); 2388 int s = splnet(); 2389 if (sc->sc_soft_flags & SOFT_RESET) 2390 softint_schedule(sc->sc_soft_ih); 2391 splx(s); 2392 callout_schedule(&sc->sc_mii_callout, hz); 2393 #ifdef DEBUG 2394 sc->sc_mii_last_tick = now; 2395 #endif 2396 mutex_exit(sc->sc_lock); 2397 } 2398