1 /* $NetBSD: pq3etsec.c,v 1.16 2012/07/22 23:46:10 matt Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 #include "opt_mpc85xx.h" 39 40 #include <sys/cdefs.h> 41 42 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.16 2012/07/22 23:46:10 matt Exp $"); 43 44 #include <sys/param.h> 45 #include <sys/cpu.h> 46 #include <sys/device.h> 47 #include <sys/mbuf.h> 48 #include <sys/ioctl.h> 49 #include <sys/intr.h> 50 #include <sys/bus.h> 51 #include <sys/kernel.h> 52 #include <sys/kmem.h> 53 #include <sys/proc.h> 54 #include <sys/atomic.h> 55 #include <sys/callout.h> 56 57 #include <net/if.h> 58 #include <net/if_dl.h> 59 #include <net/if_ether.h> 60 #include <net/if_media.h> 61 62 #include <dev/mii/miivar.h> 63 64 #include <net/bpf.h> 65 66 #ifdef INET 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/ip.h> 70 #include <netinet/in_offload.h> 71 #endif /* INET */ 72 #ifdef INET6 73 #include <netinet6/in6.h> 74 #include <netinet/ip6.h> 75 #endif 76 #include <netinet6/in6_offload.h> 77 78 79 #include <powerpc/spr.h> 80 #include <powerpc/booke/spr.h> 81 82 #include <powerpc/booke/cpuvar.h> 83 #include <powerpc/booke/e500var.h> 84 #include <powerpc/booke/e500reg.h> 85 #include <powerpc/booke/etsecreg.h> 86 87 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 88 89 #define ETSEC_MAXTXMBUFS 30 90 #define ETSEC_NTXSEGS 30 91 #define ETSEC_MAXRXMBUFS 511 92 #define ETSEC_MINRXMBUFS 32 93 #define ETSEC_NRXSEGS 1 94 95 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 96 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 97 |IFCAP_CSUM_UDPv4_Rx\ 98 |IFCAP_CSUM_TCPv6_Rx\ 99 |IFCAP_CSUM_UDPv6_Rx) 100 101 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 102 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 103 |IFCAP_CSUM_UDPv4_Tx\ 104 |IFCAP_CSUM_TCPv6_Tx\ 105 |IFCAP_CSUM_UDPv6_Tx) 106 107 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 108 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 109 110 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 111 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 112 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 113 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 114 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 115 #define M_CSUM_CIP (M_CSUM_IPv4) 116 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 117 118 struct pq3etsec_txqueue { 119 bus_dmamap_t txq_descmap; 120 volatile struct txbd *txq_consumer; 121 volatile struct txbd *txq_producer; 122 volatile struct txbd *txq_first; 123 volatile struct txbd *txq_last; 124 struct ifqueue txq_mbufs; 125 struct mbuf *txq_next; 126 #ifdef ETSEC_DEBUG 127 struct mbuf *txq_lmbufs[512]; 128 #endif 129 uint32_t txq_qmask; 130 uint32_t txq_free; 131 uint32_t txq_threshold; 132 uint32_t txq_lastintr; 133 bus_size_t txq_reg_tbase; 134 bus_dma_segment_t txq_descmap_seg; 135 }; 136 137 struct pq3etsec_rxqueue { 138 bus_dmamap_t rxq_descmap; 139 volatile struct rxbd *rxq_consumer; 140 volatile struct rxbd *rxq_producer; 141 volatile struct rxbd *rxq_first; 142 volatile struct rxbd *rxq_last; 143 struct mbuf *rxq_mhead; 144 struct mbuf **rxq_mtail; 145 struct mbuf *rxq_mconsumer; 146 #ifdef ETSEC_DEBUG 147 struct mbuf *rxq_mbufs[512]; 148 #endif 149 uint32_t rxq_qmask; 150 uint32_t rxq_inuse; 151 uint32_t rxq_threshold; 152 bus_size_t rxq_reg_rbase; 153 bus_size_t rxq_reg_rbptr; 154 bus_dma_segment_t rxq_descmap_seg; 155 }; 156 157 struct pq3etsec_mapcache { 158 u_int dmc_nmaps; 159 u_int dmc_maxseg; 160 u_int dmc_maxmaps; 161 u_int dmc_maxmapsize; 162 bus_dmamap_t dmc_maps[0]; 163 }; 164 165 struct pq3etsec_softc { 166 device_t sc_dev; 167 device_t sc_mdio_dev; 168 struct ethercom sc_ec; 169 #define sc_if sc_ec.ec_if 170 struct mii_data sc_mii; 171 bus_space_tag_t sc_bst; 172 bus_space_handle_t sc_bsh; 173 bus_space_handle_t sc_mdio_bsh; 174 bus_dma_tag_t sc_dmat; 175 int sc_phy_addr; 176 prop_dictionary_t sc_intrmap; 177 uint32_t sc_intrmask; 178 179 uint32_t sc_soft_flags; 180 #define SOFT_RESET 0x0001 181 #define SOFT_RXINTR 0x0010 182 #define SOFT_RXBSY 0x0020 183 #define SOFT_TXINTR 0x0100 184 #define SOFT_TXERROR 0x0200 185 186 struct pq3etsec_txqueue sc_txq; 187 struct pq3etsec_rxqueue sc_rxq; 188 uint32_t sc_txerrors; 189 uint32_t sc_rxerrors; 190 191 size_t sc_rx_adjlen; 192 193 /* 194 * Copies of various ETSEC registers. 195 */ 196 uint32_t sc_imask; 197 uint32_t sc_maccfg1; 198 uint32_t sc_maccfg2; 199 uint32_t sc_maxfrm; 200 uint32_t sc_ecntrl; 201 uint32_t sc_dmactrl; 202 uint32_t sc_macstnaddr1; 203 uint32_t sc_macstnaddr2; 204 uint32_t sc_tctrl; 205 uint32_t sc_rctrl; 206 uint32_t sc_gaddr[16]; 207 uint64_t sc_macaddrs[15]; 208 209 void *sc_tx_ih; 210 void *sc_rx_ih; 211 void *sc_error_ih; 212 void *sc_soft_ih; 213 214 kmutex_t *sc_lock; 215 216 struct evcnt sc_ev_tx_stall; 217 struct evcnt sc_ev_tx_intr; 218 struct evcnt sc_ev_rx_stall; 219 struct evcnt sc_ev_rx_intr; 220 struct evcnt sc_ev_error_intr; 221 struct evcnt sc_ev_soft_intr; 222 struct evcnt sc_ev_tx_pause; 223 struct evcnt sc_ev_rx_pause; 224 struct evcnt sc_ev_mii_ticks; 225 226 struct callout sc_mii_callout; 227 uint64_t sc_mii_last_tick; 228 229 struct ifqueue sc_rx_bufcache; 230 struct pq3etsec_mapcache *sc_rx_mapcache; 231 struct pq3etsec_mapcache *sc_tx_mapcache; 232 }; 233 234 struct pq3mdio_softc { 235 device_t mdio_dev; 236 237 kmutex_t *mdio_lock; 238 239 bus_space_tag_t mdio_bst; 240 bus_space_handle_t mdio_bsh; 241 }; 242 243 static int pq3etsec_match(device_t, cfdata_t, void *); 244 static void pq3etsec_attach(device_t, device_t, void *); 245 246 static int pq3mdio_match(device_t, cfdata_t, void *); 247 static void pq3mdio_attach(device_t, device_t, void *); 248 249 static void pq3etsec_ifstart(struct ifnet *); 250 static void pq3etsec_ifwatchdog(struct ifnet *); 251 static int pq3etsec_ifinit(struct ifnet *); 252 static void pq3etsec_ifstop(struct ifnet *, int); 253 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 254 255 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 256 struct pq3etsec_mapcache **, size_t, size_t, size_t); 257 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 258 struct pq3etsec_mapcache *); 259 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 260 struct pq3etsec_mapcache *); 261 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 262 struct pq3etsec_mapcache *, bus_dmamap_t); 263 264 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 265 struct pq3etsec_txqueue *, u_int); 266 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 267 struct pq3etsec_txqueue *); 268 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 269 struct pq3etsec_txqueue *); 270 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 271 struct pq3etsec_txqueue *); 272 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 273 struct pq3etsec_txqueue *, struct mbuf *m); 274 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 275 struct pq3etsec_txqueue *); 276 277 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 278 struct pq3etsec_rxqueue *, u_int); 279 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 280 struct pq3etsec_rxqueue *); 281 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 282 struct pq3etsec_rxqueue *, bool); 283 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 284 struct pq3etsec_rxqueue *); 285 286 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 287 288 static void pq3etsec_mii_tick(void *); 289 static int pq3etsec_rx_intr(void *); 290 static int pq3etsec_tx_intr(void *); 291 static int pq3etsec_error_intr(void *); 292 static void pq3etsec_soft_intr(void *); 293 294 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 295 pq3etsec_match, pq3etsec_attach, NULL, NULL); 296 297 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 298 pq3mdio_match, pq3mdio_attach, NULL, NULL); 299 300 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 301 pq3mdio_match, pq3mdio_attach, NULL, NULL); 302 303 static inline uint32_t 304 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 305 { 306 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 307 } 308 309 static inline void 310 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 311 { 312 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 313 } 314 315 static inline uint32_t 316 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 317 { 318 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 319 } 320 321 static int 322 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 323 { 324 return strcmp(cf->cf_name, "mdio") == 0; 325 } 326 327 static int 328 pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 329 { 330 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 331 const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 332 || svr == (SVR_P1016v1 >> 16)); 333 334 if (device_is_a(parent, "cpunode")) { 335 if (!p1025_p 336 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 337 return 0; 338 339 return 1; 340 } 341 342 if (device_is_a(parent, "tsec")) { 343 if (p1025_p 344 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 345 return 0; 346 347 return 1; 348 } 349 350 return 0; 351 } 352 353 static void 354 pq3mdio_attach(device_t parent, device_t self, void *aux) 355 { 356 struct pq3mdio_softc * const mdio = device_private(self); 357 struct cpunode_attach_args * const cna = aux; 358 struct cpunode_locators * const cnl = &cna->cna_locs; 359 360 mdio->mdio_dev = self; 361 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 362 363 if (device_is_a(parent, "cpunode")) { 364 struct cpunode_softc * const psc = device_private(parent); 365 psc->sc_children |= cna->cna_childmask; 366 367 mdio->mdio_bst = cna->cna_memt; 368 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 369 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 370 aprint_error(": error mapping registers @ %#x\n", 371 cnl->cnl_addr); 372 return; 373 } 374 } else { 375 struct pq3etsec_softc * const sc = device_private(parent); 376 377 KASSERT(device_is_a(parent, "tsec")); 378 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 379 || cnl->cnl_addr == ETSEC2_BASE 380 || cnl->cnl_addr == ETSEC3_BASE 381 || cnl->cnl_addr == ETSEC4_BASE, 382 "unknown tsec addr %x", cnl->cnl_addr); 383 384 mdio->mdio_bst = sc->sc_bst; 385 mdio->mdio_bsh = sc->sc_bsh; 386 } 387 388 aprint_normal("\n"); 389 } 390 391 static int 392 pq3mdio_mii_readreg(device_t self, int phy, int reg) 393 { 394 struct pq3mdio_softc * const mdio = device_private(self); 395 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 396 397 mutex_enter(mdio->mdio_lock); 398 399 etsec_mdio_write(mdio, MIIMADD, 400 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 401 402 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 403 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 404 405 while (etsec_mdio_read(mdio, MIIMIND) != 0) { 406 delay(1); 407 } 408 int data = etsec_mdio_read(mdio, MIIMSTAT); 409 410 if (miimcom == MIIMCOM_SCAN) 411 etsec_mdio_write(mdio, MIIMCOM, miimcom); 412 413 #if 0 414 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 415 __func__, phy, reg, data); 416 #endif 417 mutex_exit(mdio->mdio_lock); 418 return data; 419 } 420 421 static void 422 pq3mdio_mii_writereg(device_t self, int phy, int reg, int data) 423 { 424 struct pq3mdio_softc * const mdio = device_private(self); 425 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 426 427 #if 0 428 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 429 __func__, phy, reg, data); 430 #endif 431 432 mutex_enter(mdio->mdio_lock); 433 434 etsec_mdio_write(mdio, MIIMADD, 435 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 436 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 437 etsec_mdio_write(mdio, MIIMCON, data); 438 439 int timo = 1000; /* 1ms */ 440 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 441 delay(1); 442 } 443 444 if (miimcom == MIIMCOM_SCAN) 445 etsec_mdio_write(mdio, MIIMCOM, miimcom); 446 447 mutex_exit(mdio->mdio_lock); 448 } 449 450 static inline void 451 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 452 { 453 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 454 } 455 456 static void 457 pq3etsec_mii_statchg(struct ifnet *ifp) 458 { 459 struct pq3etsec_softc * const sc = ifp->if_softc; 460 struct mii_data * const mii = &sc->sc_mii; 461 462 uint32_t maccfg1 = sc->sc_maccfg1; 463 uint32_t maccfg2 = sc->sc_maccfg2; 464 uint32_t ecntrl = sc->sc_ecntrl; 465 466 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 467 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 468 469 if (sc->sc_mii.mii_media_active & IFM_FDX) { 470 maccfg2 |= MACCFG2_FD; 471 } 472 473 /* 474 * Now deal with the flow control bits. 475 */ 476 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 477 && (mii->mii_media_active & IFM_ETH_FMASK)) { 478 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 479 maccfg1 |= MACCFG1_RX_FLOW; 480 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 481 maccfg1 |= MACCFG1_TX_FLOW; 482 } 483 484 /* 485 * Now deal with the speed. 486 */ 487 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 488 maccfg2 |= MACCFG2_IFMODE_GMII; 489 } else { 490 maccfg2 |= MACCFG2_IFMODE_MII; 491 ecntrl &= ~ECNTRL_R100M; 492 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 493 ecntrl |= ECNTRL_R100M; 494 } 495 } 496 497 /* 498 * If things are different, re-init things. 499 */ 500 if (maccfg1 != sc->sc_maccfg1 501 || maccfg2 != sc->sc_maccfg2 502 || ecntrl != sc->sc_ecntrl) { 503 if (sc->sc_if.if_flags & IFF_RUNNING) 504 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 505 sc->sc_maccfg1 = maccfg1; 506 sc->sc_maccfg2 = maccfg2; 507 sc->sc_ecntrl = ecntrl; 508 } 509 } 510 511 #if 0 512 static void 513 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 514 { 515 struct pq3etsec_softc * const sc = ifp->if_softc; 516 517 mii_pollstat(&sc->sc_mii); 518 ether_mediastatus(ifp, ifmr); 519 ifmr->ifm_status = sc->sc_mii.mii_media_status; 520 ifmr->ifm_active = sc->sc_mii.mii_media_active; 521 } 522 523 static int 524 pq3etsec_mediachange(struct ifnet *ifp) 525 { 526 struct pq3etsec_softc * const sc = ifp->if_softc; 527 528 if ((ifp->if_flags & IFF_UP) == 0) 529 return 0; 530 531 int rv = mii_mediachg(&sc->sc_mii); 532 return (rv == ENXIO) ? 0 : rv; 533 } 534 #endif 535 536 static int 537 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 538 { 539 540 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 541 return 0; 542 543 return 1; 544 } 545 546 static void 547 pq3etsec_attach(device_t parent, device_t self, void *aux) 548 { 549 struct cpunode_softc * const psc = device_private(parent); 550 struct pq3etsec_softc * const sc = device_private(self); 551 struct cpunode_attach_args * const cna = aux; 552 struct cpunode_locators * const cnl = &cna->cna_locs; 553 cfdata_t cf = device_cfdata(self); 554 int error; 555 556 psc->sc_children |= cna->cna_childmask; 557 sc->sc_dev = self; 558 sc->sc_bst = cna->cna_memt; 559 sc->sc_dmat = &booke_bus_dma_tag; 560 561 /* 562 * Pull out the mdio bus and phy we are supposed to use. 563 */ 564 const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 565 const int phy = cf->cf_loc[CPUNODECF_PHY]; 566 if (mdio != CPUNODECF_MDIO_DEFAULT) 567 aprint_normal(" mdio %d", mdio); 568 569 /* 570 * See if the phy is in the config file... 571 */ 572 if (phy != CPUNODECF_PHY_DEFAULT) { 573 sc->sc_phy_addr = phy; 574 } else { 575 unsigned char prop_name[20]; 576 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 577 cnl->cnl_instance); 578 sc->sc_phy_addr = board_info_get_number(prop_name); 579 } 580 if (sc->sc_phy_addr != MII_PHY_ANY) 581 aprint_normal(" phy %d", sc->sc_phy_addr); 582 583 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 584 &sc->sc_bsh); 585 if (error) { 586 aprint_error(": error mapping registers: %d\n", error); 587 return; 588 } 589 590 /* 591 * Assume firmware has aready set the mac address and fetch it 592 * before we reinit it. 593 */ 594 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 595 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 596 sc->sc_rctrl = RCTRL_DEFAULT; 597 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 598 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 599 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 600 601 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 602 size_t len; 603 const uint8_t *mac_addr = 604 board_info_get_data("tsec-mac-addr-base", &len); 605 KASSERT(len == ETHER_ADDR_LEN); 606 sc->sc_macstnaddr2 = 607 (mac_addr[1] << 24) 608 | (mac_addr[0] << 16); 609 sc->sc_macstnaddr1 = 610 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 611 | (mac_addr[4] << 16) 612 | (mac_addr[3] << 8) 613 | (mac_addr[2] << 0); 614 #if 0 615 aprint_error(": mac-address unknown\n"); 616 return; 617 #endif 618 } 619 620 char enaddr[ETHER_ADDR_LEN] = { 621 [0] = sc->sc_macstnaddr2 >> 16, 622 [1] = sc->sc_macstnaddr2 >> 24, 623 [2] = sc->sc_macstnaddr1 >> 0, 624 [3] = sc->sc_macstnaddr1 >> 8, 625 [4] = sc->sc_macstnaddr1 >> 16, 626 [5] = sc->sc_macstnaddr1 >> 24, 627 }; 628 629 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 630 if (error) { 631 aprint_error(": failed to init rxq: %d\n", error); 632 return; 633 } 634 635 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 636 if (error) { 637 aprint_error(": failed to init txq: %d\n", error); 638 return; 639 } 640 641 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 642 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 643 if (error) { 644 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 645 return; 646 } 647 648 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 649 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 650 if (error) { 651 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 652 return; 653 } 654 655 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 656 pq3etsec_tx_intr, sc); 657 if (sc->sc_tx_ih == NULL) { 658 aprint_error(": failed to establish tx interrupt: %d\n", 659 cnl->cnl_intrs[0]); 660 return; 661 } 662 663 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 664 pq3etsec_rx_intr, sc); 665 if (sc->sc_rx_ih == NULL) { 666 aprint_error(": failed to establish rx interrupt: %d\n", 667 cnl->cnl_intrs[1]); 668 return; 669 } 670 671 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 672 pq3etsec_error_intr, sc); 673 if (sc->sc_error_ih == NULL) { 674 aprint_error(": failed to establish error interrupt: %d\n", 675 cnl->cnl_intrs[2]); 676 return; 677 } 678 679 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 680 pq3etsec_soft_intr, sc); 681 if (sc->sc_soft_ih == NULL) { 682 aprint_error(": failed to establish soft interrupt\n"); 683 return; 684 } 685 686 /* 687 * If there was no MDIO 688 */ 689 if (mdio == CPUNODECF_MDIO_DEFAULT) { 690 aprint_normal("\n"); 691 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna); 692 if (mdio_cf != NULL) { 693 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL); 694 } 695 } else { 696 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 697 if (sc->sc_mdio_dev == NULL) { 698 aprint_error(": failed to locate mdio device\n"); 699 return; 700 } 701 aprint_normal("\n"); 702 } 703 704 etsec_write(sc, ATTR, ATTR_DEFAULT); 705 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 706 707 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 708 709 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 710 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 711 712 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 713 ether_sprintf(enaddr)); 714 715 const char * const xname = device_xname(sc->sc_dev); 716 struct ethercom * const ec = &sc->sc_ec; 717 struct ifnet * const ifp = &ec->ec_if; 718 719 ec->ec_mii = &sc->sc_mii; 720 721 sc->sc_mii.mii_ifp = ifp; 722 sc->sc_mii.mii_readreg = pq3mdio_mii_readreg; 723 sc->sc_mii.mii_writereg = pq3mdio_mii_writereg; 724 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 725 726 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 727 ether_mediastatus); 728 729 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 730 mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff, 731 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 732 733 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 734 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 735 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 736 } else { 737 callout_schedule(&sc->sc_mii_callout, hz); 738 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 739 } 740 } else { 741 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 742 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 743 } 744 745 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 746 | ETHERCAP_JUMBO_MTU; 747 748 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 749 ifp->if_softc = sc; 750 ifp->if_capabilities = IFCAP_ETSEC; 751 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 752 ifp->if_ioctl = pq3etsec_ifioctl; 753 ifp->if_start = pq3etsec_ifstart; 754 ifp->if_watchdog = pq3etsec_ifwatchdog; 755 ifp->if_init = pq3etsec_ifinit; 756 ifp->if_stop = pq3etsec_ifstop; 757 IFQ_SET_READY(&ifp->if_snd); 758 759 pq3etsec_ifstop(ifp, true); 760 761 /* 762 * Attach the interface. 763 */ 764 if_attach(ifp); 765 ether_ifattach(ifp, enaddr); 766 767 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 768 NULL, xname, "rx stall"); 769 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 770 NULL, xname, "tx stall"); 771 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 772 NULL, xname, "tx intr"); 773 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 774 NULL, xname, "rx intr"); 775 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 776 NULL, xname, "error intr"); 777 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 778 NULL, xname, "soft intr"); 779 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 780 NULL, xname, "tx pause"); 781 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 782 NULL, xname, "rx pause"); 783 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 784 NULL, xname, "mii ticks"); 785 } 786 787 static uint64_t 788 pq3etsec_macaddr_create(const uint8_t *lladdr) 789 { 790 uint64_t macaddr = 0; 791 792 lladdr += ETHER_ADDR_LEN; 793 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 794 macaddr = (macaddr << 8) | *--lladdr; 795 } 796 return macaddr << 16; 797 } 798 799 static int 800 pq3etsec_ifinit(struct ifnet *ifp) 801 { 802 struct pq3etsec_softc * const sc = ifp->if_softc; 803 int error = 0; 804 805 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 806 if (ifp->if_mtu > ETHERMTU_JUMBO) 807 return error; 808 809 KASSERT(ifp->if_flags & IFF_UP); 810 811 /* 812 * Stop the interface (steps 1 to 4 in the Soft Reset and 813 * Reconfigurating Procedure. 814 */ 815 pq3etsec_ifstop(ifp, 0); 816 817 /* 818 * If our frame size has changed (or it's our first time through) 819 * destroy the existing transmit mapcache. 820 */ 821 if (sc->sc_tx_mapcache != NULL 822 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 823 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 824 sc->sc_tx_mapcache = NULL; 825 } 826 827 if (sc->sc_tx_mapcache == NULL) { 828 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 829 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 830 if (error) 831 return error; 832 } 833 834 sc->sc_ev_mii_ticks.ev_count++; 835 mii_tick(&sc->sc_mii); 836 837 if (ifp->if_flags & IFF_PROMISC) { 838 sc->sc_rctrl |= RCTRL_PROM; 839 } else { 840 sc->sc_rctrl &= ~RCTRL_PROM; 841 } 842 843 uint32_t rctrl_prsdep = 0; 844 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 845 if (VLAN_ATTACHED(&sc->sc_ec)) { 846 sc->sc_rctrl |= RCTRL_VLEX; 847 rctrl_prsdep = RCTRL_PRSDEP_L2; 848 } 849 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 850 sc->sc_rctrl |= RCTRL_IPCSEN; 851 rctrl_prsdep = RCTRL_PRSDEP_L3; 852 } 853 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 854 sc->sc_rctrl |= RCTRL_TUCSEN; 855 rctrl_prsdep = RCTRL_PRSDEP_L4; 856 } 857 sc->sc_rctrl |= rctrl_prsdep; 858 #if 0 859 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 860 aprint_normal_dev(sc->sc_dev, 861 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 862 sc->sc_rctrl, 863 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 864 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 865 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 866 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 867 #endif 868 869 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 870 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 871 sc->sc_tctrl |= TCTRL_VLINS; 872 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 873 sc->sc_tctrl |= TCTRL_IPCSEN; 874 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 875 sc->sc_tctrl |= TCTRL_TUCSEN; 876 #if 0 877 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 878 aprint_normal_dev(sc->sc_dev, 879 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 880 sc->sc_tctrl, 881 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 882 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 883 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 884 #endif 885 886 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 887 888 const uint64_t macstnaddr = 889 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 890 891 sc->sc_imask = IEVENT_DPE; 892 893 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 894 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 895 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 896 897 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 898 pq3etsec_txq_reset(sc, &sc->sc_txq); 899 900 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 901 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 902 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 903 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 904 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 905 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 906 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 907 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 908 909 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 910 pq3etsec_mc_setup(sc); 911 912 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 913 etsec_write(sc, MRBLR, MCLBYTES); 914 915 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 916 sc->sc_dmactrl |= DMACTRL_DEFAULT; 917 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 918 919 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 920 etsec_write(sc, TQUEUE, TQUEUE_EN0); 921 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 922 923 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 924 925 /* 12. Enable receive queues in RQUEUE, */ 926 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 927 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 928 929 /* and optionally set TOE functionality in RCTRL. */ 930 etsec_write(sc, RCTRL, sc->sc_rctrl); 931 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 932 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 933 sc->sc_rx_adjlen += sizeof(struct rxfcb); 934 935 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 936 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 937 938 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 939 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 940 941 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 942 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 943 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 944 945 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 946 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 947 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 948 949 sc->sc_soft_flags = 0; 950 951 etsec_write(sc, IMASK, sc->sc_imask); 952 953 ifp->if_flags |= IFF_RUNNING; 954 955 return error; 956 } 957 958 static void 959 pq3etsec_ifstop(struct ifnet *ifp, int disable) 960 { 961 struct pq3etsec_softc * const sc = ifp->if_softc; 962 963 KASSERT(!cpu_intr_p()); 964 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 965 /* 966 * Clear the GTSC and GRSC from the interrupt mask until 967 * we are ready for them. Then clear them from IEVENT, 968 * request the graceful shutdown, and then enable the 969 * GTSC and GRSC bits in the mask. This should cause the 970 * error interrupt to fire which will issue a wakeup to 971 * allow us to resume. 972 */ 973 974 /* 975 * 1. Set GRS/GTS bits in DMACTRL register 976 */ 977 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 978 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 979 etsec_write(sc, IEVENT, imask_gsc_mask); 980 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 981 982 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 983 /* 984 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 985 */ 986 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 987 988 u_int timo = 1000; 989 uint32_t ievent = etsec_read(sc, IEVENT); 990 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 991 if (--timo == 0) { 992 aprint_error_dev(sc->sc_dev, 993 "WARNING: " 994 "request to stop failed (IEVENT=%#x)\n", 995 ievent); 996 break; 997 } 998 delay(10); 999 ievent = etsec_read(sc, IEVENT); 1000 } 1001 } 1002 1003 /* 1004 * Now reset the controller. 1005 * 1006 * 3. Set SOFT_RESET bit in MACCFG1 register 1007 * 4. Clear SOFT_RESET bit in MACCFG1 register 1008 */ 1009 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1010 etsec_write(sc, MACCFG1, 0); 1011 etsec_write(sc, IMASK, 0); 1012 etsec_write(sc, IEVENT, ~0); 1013 sc->sc_imask = 0; 1014 ifp->if_flags &= ~IFF_RUNNING; 1015 1016 uint32_t tbipa = etsec_read(sc, TBIPA); 1017 if (tbipa == sc->sc_phy_addr) { 1018 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1019 etsec_write(sc, TBIPA, 0x1f); 1020 } 1021 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1022 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1023 etsec_write(sc, MIIMCFG, miimcfg); 1024 1025 /* 1026 * Let's consume any remaing transmitted packets. And if we are 1027 * disabling the interface, purge ourselves of any untransmitted 1028 * packets. But don't consume any received packets, just drop them. 1029 * If we aren't disabling the interface, save the mbufs in the 1030 * receive queue for reuse. 1031 */ 1032 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1033 pq3etsec_txq_consume(sc, &sc->sc_txq); 1034 if (disable) { 1035 pq3etsec_txq_purge(sc, &sc->sc_txq); 1036 IF_PURGE(&ifp->if_snd); 1037 } 1038 } 1039 1040 static void 1041 pq3etsec_ifwatchdog(struct ifnet *ifp) 1042 { 1043 } 1044 1045 static void 1046 pq3etsec_mc_setup( 1047 struct pq3etsec_softc *sc) 1048 { 1049 struct ethercom * const ec = &sc->sc_ec; 1050 struct ifnet * const ifp = &sc->sc_if; 1051 struct ether_multi *enm; 1052 struct ether_multistep step; 1053 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1054 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1055 1056 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1057 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1058 1059 ifp->if_flags &= ~IFF_ALLMULTI; 1060 1061 ETHER_FIRST_MULTI(step, ec, enm); 1062 for (u_int i = 0; enm != NULL; ) { 1063 const char *addr = enm->enm_addrlo; 1064 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1065 ifp->if_flags |= IFF_ALLMULTI; 1066 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1067 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1068 break; 1069 } 1070 if ((sc->sc_rctrl & RCTRL_EMEN) 1071 && i < __arraycount(sc->sc_macaddrs)) { 1072 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1073 } else { 1074 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1075 #if 0 1076 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1077 ether_sprintf(addr), crc, 1078 crc >> crc_shift, 1079 crc >> (crc_shift + 5), 1080 (crc >> crc_shift) & 31, 1081 1 << (((crc >> crc_shift) & 31) ^ 31)); 1082 #endif 1083 /* 1084 * The documentation doesn't completely follow PowerPC 1085 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1086 * is 0x7fa32d9b. By empirical testing, the 1087 * corresponding hash bit is word 3, bit 31 (ppc bit 1088 * order). Since 3 << 31 | 31 is 0x7f, we deduce 1089 * H[0:2] selects the register while H[3:7] selects 1090 * the bit (ppc bit order). 1091 */ 1092 crc >>= crc_shift; 1093 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1094 } 1095 ETHER_NEXT_MULTI(step, enm); 1096 } 1097 for (u_int i = 0; i < 8; i++) { 1098 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1099 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1100 #if 0 1101 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1102 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1103 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1104 i, GADDR(i), etsec_read(sc, GADDR(i))); 1105 #endif 1106 } 1107 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1108 uint64_t macaddr = sc->sc_macaddrs[i]; 1109 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1110 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1111 #if 0 1112 if (macaddr) 1113 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1114 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1115 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1116 #endif 1117 } 1118 } 1119 1120 static int 1121 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1122 { 1123 struct pq3etsec_softc *sc = ifp->if_softc; 1124 struct ifreq * const ifr = data; 1125 const int s = splnet(); 1126 int error; 1127 1128 switch (cmd) { 1129 case SIOCSIFMEDIA: 1130 case SIOCGIFMEDIA: 1131 /* Flow control requires full-duplex mode. */ 1132 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1133 (ifr->ifr_media & IFM_FDX) == 0) 1134 ifr->ifr_media &= ~IFM_ETH_FMASK; 1135 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1136 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1137 /* We can do both TXPAUSE and RXPAUSE. */ 1138 ifr->ifr_media |= 1139 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1140 } 1141 } 1142 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1143 break; 1144 1145 default: 1146 error = ether_ioctl(ifp, cmd, data); 1147 if (error != ENETRESET) 1148 break; 1149 1150 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1151 error = 0; 1152 if (ifp->if_flags & IFF_RUNNING) 1153 pq3etsec_mc_setup(sc); 1154 break; 1155 } 1156 error = pq3etsec_ifinit(ifp); 1157 break; 1158 } 1159 1160 splx(s); 1161 return error; 1162 } 1163 1164 static void 1165 pq3etsec_rxq_desc_presync( 1166 struct pq3etsec_softc *sc, 1167 struct pq3etsec_rxqueue *rxq, 1168 volatile struct rxbd *rxbd, 1169 size_t count) 1170 { 1171 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1172 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1174 } 1175 1176 static void 1177 pq3etsec_rxq_desc_postsync( 1178 struct pq3etsec_softc *sc, 1179 struct pq3etsec_rxqueue *rxq, 1180 volatile struct rxbd *rxbd, 1181 size_t count) 1182 { 1183 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1184 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1185 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1186 } 1187 1188 static void 1189 pq3etsec_txq_desc_presync( 1190 struct pq3etsec_softc *sc, 1191 struct pq3etsec_txqueue *txq, 1192 volatile struct txbd *txbd, 1193 size_t count) 1194 { 1195 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1196 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1197 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1198 } 1199 1200 static void 1201 pq3etsec_txq_desc_postsync( 1202 struct pq3etsec_softc *sc, 1203 struct pq3etsec_txqueue *txq, 1204 volatile struct txbd *txbd, 1205 size_t count) 1206 { 1207 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1208 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1209 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1210 } 1211 1212 static bus_dmamap_t 1213 pq3etsec_mapcache_get( 1214 struct pq3etsec_softc *sc, 1215 struct pq3etsec_mapcache *dmc) 1216 { 1217 KASSERT(dmc->dmc_nmaps > 0); 1218 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1219 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1220 } 1221 1222 static void 1223 pq3etsec_mapcache_put( 1224 struct pq3etsec_softc *sc, 1225 struct pq3etsec_mapcache *dmc, 1226 bus_dmamap_t map) 1227 { 1228 KASSERT(map != NULL); 1229 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1230 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1231 } 1232 1233 static void 1234 pq3etsec_mapcache_destroy( 1235 struct pq3etsec_softc *sc, 1236 struct pq3etsec_mapcache *dmc) 1237 { 1238 const size_t dmc_size = 1239 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1240 1241 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1242 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1243 } 1244 kmem_intr_free(dmc, dmc_size); 1245 } 1246 1247 static int 1248 pq3etsec_mapcache_create( 1249 struct pq3etsec_softc *sc, 1250 struct pq3etsec_mapcache **dmc_p, 1251 size_t maxmaps, 1252 size_t maxmapsize, 1253 size_t maxseg) 1254 { 1255 const size_t dmc_size = 1256 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1257 struct pq3etsec_mapcache * const dmc = 1258 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1259 1260 dmc->dmc_maxmaps = maxmaps; 1261 dmc->dmc_nmaps = maxmaps; 1262 dmc->dmc_maxmapsize = maxmapsize; 1263 dmc->dmc_maxseg = maxseg; 1264 1265 for (u_int i = 0; i < maxmaps; i++) { 1266 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1267 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1268 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1269 if (error) { 1270 aprint_error_dev(sc->sc_dev, 1271 "failed to creat dma map cache " 1272 "entry %u of %zu: %d\n", 1273 i, maxmaps, error); 1274 while (i-- > 0) { 1275 bus_dmamap_destroy(sc->sc_dmat, 1276 dmc->dmc_maps[i]); 1277 } 1278 kmem_intr_free(dmc, dmc_size); 1279 return error; 1280 } 1281 KASSERT(dmc->dmc_maps[i] != NULL); 1282 } 1283 1284 *dmc_p = dmc; 1285 1286 return 0; 1287 } 1288 1289 #if 0 1290 static void 1291 pq3etsec_dmamem_free( 1292 bus_dma_tag_t dmat, 1293 size_t map_size, 1294 bus_dma_segment_t *seg, 1295 bus_dmamap_t map, 1296 void *kvap) 1297 { 1298 bus_dmamap_destroy(dmat, map); 1299 bus_dmamem_unmap(dmat, kvap, map_size); 1300 bus_dmamem_free(dmat, seg, 1); 1301 } 1302 #endif 1303 1304 static int 1305 pq3etsec_dmamem_alloc( 1306 bus_dma_tag_t dmat, 1307 size_t map_size, 1308 bus_dma_segment_t *seg, 1309 bus_dmamap_t *map, 1310 void **kvap) 1311 { 1312 int error; 1313 int nseg; 1314 1315 *kvap = NULL; 1316 *map = NULL; 1317 1318 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1319 seg, 1, &nseg, 0); 1320 if (error) 1321 return error; 1322 1323 KASSERT(nseg == 1); 1324 1325 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1326 BUS_DMA_COHERENT); 1327 if (error == 0) { 1328 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1329 map); 1330 if (error == 0) { 1331 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1332 NULL, 0); 1333 if (error == 0) 1334 return 0; 1335 bus_dmamap_destroy(dmat, *map); 1336 *map = NULL; 1337 } 1338 bus_dmamem_unmap(dmat, *kvap, map_size); 1339 *kvap = NULL; 1340 } 1341 bus_dmamem_free(dmat, seg, nseg); 1342 return 0; 1343 } 1344 1345 static struct mbuf * 1346 pq3etsec_rx_buf_alloc( 1347 struct pq3etsec_softc *sc) 1348 { 1349 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1350 if (m == NULL) { 1351 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1352 return NULL; 1353 } 1354 MCLGET(m, M_DONTWAIT); 1355 if ((m->m_flags & M_EXT) == 0) { 1356 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1357 m_freem(m); 1358 return NULL; 1359 } 1360 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1361 1362 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1363 if (map == NULL) { 1364 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1365 m_freem(m); 1366 return NULL; 1367 } 1368 M_SETCTX(m, map); 1369 m->m_len = m->m_pkthdr.len = MCLBYTES; 1370 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1371 BUS_DMA_READ|BUS_DMA_NOWAIT); 1372 if (error) { 1373 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1374 error); 1375 M_SETCTX(m, NULL); 1376 m_freem(m); 1377 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1378 return NULL; 1379 } 1380 KASSERT(map->dm_mapsize == MCLBYTES); 1381 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1382 BUS_DMASYNC_PREREAD); 1383 1384 return m; 1385 } 1386 1387 static void 1388 pq3etsec_rx_map_unload( 1389 struct pq3etsec_softc *sc, 1390 struct mbuf *m) 1391 { 1392 KASSERT(m); 1393 for (; m != NULL; m = m->m_next) { 1394 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1395 KASSERT(map); 1396 KASSERT(map->dm_mapsize == MCLBYTES); 1397 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1398 BUS_DMASYNC_POSTREAD); 1399 bus_dmamap_unload(sc->sc_dmat, map); 1400 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1401 M_SETCTX(m, NULL); 1402 } 1403 } 1404 1405 static bool 1406 pq3etsec_rxq_produce( 1407 struct pq3etsec_softc *sc, 1408 struct pq3etsec_rxqueue *rxq) 1409 { 1410 volatile struct rxbd *producer = rxq->rxq_producer; 1411 #if 0 1412 size_t inuse = rxq->rxq_inuse; 1413 #endif 1414 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1415 struct mbuf *m; 1416 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1417 if (m == NULL) { 1418 m = pq3etsec_rx_buf_alloc(sc); 1419 if (m == NULL) { 1420 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1421 break; 1422 } 1423 } 1424 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1425 KASSERT(map); 1426 1427 #ifdef ETSEC_DEBUG 1428 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1429 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1430 #endif 1431 1432 /* rxbd_len is write-only by the ETSEC */ 1433 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1434 membar_producer(); 1435 producer->rxbd_flags |= RXBD_E; 1436 if (__predict_false(rxq->rxq_mhead == NULL)) { 1437 KASSERT(producer == rxq->rxq_consumer); 1438 rxq->rxq_mconsumer = m; 1439 } 1440 *rxq->rxq_mtail = m; 1441 rxq->rxq_mtail = &m->m_next; 1442 m->m_len = MCLBYTES; 1443 m->m_next = NULL; 1444 rxq->rxq_inuse++; 1445 if (++producer == rxq->rxq_last) { 1446 membar_producer(); 1447 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1448 rxq->rxq_last - rxq->rxq_producer); 1449 producer = rxq->rxq_producer = rxq->rxq_first; 1450 } 1451 } 1452 if (producer != rxq->rxq_producer) { 1453 membar_producer(); 1454 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1455 producer - rxq->rxq_producer); 1456 rxq->rxq_producer = producer; 1457 } 1458 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1459 if (qhlt) { 1460 KASSERT(qhlt & rxq->rxq_qmask); 1461 sc->sc_ev_rx_stall.ev_count++; 1462 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1463 } 1464 #if 0 1465 aprint_normal_dev(sc->sc_dev, 1466 "%s: buffers inuse went from %zu to %zu\n", 1467 __func__, inuse, rxq->rxq_inuse); 1468 #endif 1469 return true; 1470 } 1471 1472 static bool 1473 pq3etsec_rx_offload( 1474 struct pq3etsec_softc *sc, 1475 struct mbuf *m, 1476 const struct rxfcb *fcb) 1477 { 1478 if (fcb->rxfcb_flags & RXFCB_VLN) { 1479 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl, 1480 m_freem(m); return false); 1481 } 1482 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1483 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1484 return true; 1485 int csum_flags = 0; 1486 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1487 csum_flags |= M_CSUM_IPv4; 1488 if (fcb->rxfcb_flags & RXFCB_EIP) 1489 csum_flags |= M_CSUM_IPv4_BAD; 1490 } 1491 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1492 int ipv_flags; 1493 if (fcb->rxfcb_flags & RXFCB_IP6) 1494 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1495 else 1496 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1497 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1498 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1499 } else { 1500 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1501 } 1502 if (fcb->rxfcb_flags & RXFCB_ETU) 1503 csum_flags |= M_CSUM_TCP_UDP_BAD; 1504 } 1505 1506 m->m_pkthdr.csum_flags = csum_flags; 1507 return true; 1508 } 1509 1510 static void 1511 pq3etsec_rx_input( 1512 struct pq3etsec_softc *sc, 1513 struct mbuf *m, 1514 uint16_t rxbd_flags) 1515 { 1516 struct ifnet * const ifp = &sc->sc_if; 1517 1518 pq3etsec_rx_map_unload(sc, m); 1519 1520 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1521 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1522 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1523 return; 1524 } 1525 m_adj(m, sc->sc_rx_adjlen); 1526 1527 if (rxbd_flags & RXBD_M) 1528 m->m_flags |= M_PROMISC; 1529 if (rxbd_flags & RXBD_BC) 1530 m->m_flags |= M_BCAST; 1531 if (rxbd_flags & RXBD_MC) 1532 m->m_flags |= M_MCAST; 1533 m->m_flags |= M_HASFCS; 1534 m->m_pkthdr.rcvif = &sc->sc_if; 1535 1536 ifp->if_ipackets++; 1537 ifp->if_ibytes += m->m_pkthdr.len; 1538 1539 /* 1540 * Let's give it to the network subsystm to deal with. 1541 */ 1542 int s = splnet(); 1543 bpf_mtap(ifp, m); 1544 (*ifp->if_input)(ifp, m); 1545 splx(s); 1546 } 1547 1548 static void 1549 pq3etsec_rxq_consume( 1550 struct pq3etsec_softc *sc, 1551 struct pq3etsec_rxqueue *rxq) 1552 { 1553 struct ifnet * const ifp = &sc->sc_if; 1554 volatile struct rxbd *consumer = rxq->rxq_consumer; 1555 size_t rxconsumed = 0; 1556 1557 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1558 1559 for (;;) { 1560 if (consumer == rxq->rxq_producer) { 1561 rxq->rxq_consumer = consumer; 1562 rxq->rxq_inuse -= rxconsumed; 1563 KASSERT(rxq->rxq_inuse == 0); 1564 return; 1565 } 1566 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1567 const uint16_t rxbd_flags = consumer->rxbd_flags; 1568 if (rxbd_flags & RXBD_E) { 1569 rxq->rxq_consumer = consumer; 1570 rxq->rxq_inuse -= rxconsumed; 1571 return; 1572 } 1573 KASSERT(rxq->rxq_mconsumer != NULL); 1574 #ifdef ETSEC_DEBUG 1575 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1576 #endif 1577 #if 0 1578 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1579 __func__, 1580 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1581 mtod(rxq->rxq_mconsumer, int *)[0], 1582 mtod(rxq->rxq_mconsumer, int *)[1], 1583 mtod(rxq->rxq_mconsumer, int *)[2], 1584 mtod(rxq->rxq_mconsumer, int *)[3]); 1585 #endif 1586 /* 1587 * We own this packet again. Clear all flags except wrap. 1588 */ 1589 rxconsumed++; 1590 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1591 1592 /* 1593 * If this descriptor has the LAST bit set and no errors, 1594 * it's a valid input packet. 1595 */ 1596 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1597 size_t rxbd_len = consumer->rxbd_len; 1598 struct mbuf *m = rxq->rxq_mhead; 1599 struct mbuf *m_last = rxq->rxq_mconsumer; 1600 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1601 rxq->rxq_mtail = &rxq->rxq_mhead; 1602 rxq->rxq_mconsumer = rxq->rxq_mhead; 1603 m_last->m_next = NULL; 1604 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1605 m->m_pkthdr.len = rxbd_len; 1606 pq3etsec_rx_input(sc, m, rxbd_flags); 1607 } else if (rxbd_flags & RXBD_L) { 1608 KASSERT(rxbd_flags & RXBD_ERRORS); 1609 struct mbuf *m; 1610 /* 1611 * We encountered an error, take the mbufs and add 1612 * then to the rx bufcache so we can reuse them. 1613 */ 1614 ifp->if_ierrors++; 1615 for (m = rxq->rxq_mhead; 1616 m != rxq->rxq_mconsumer; 1617 m = m->m_next) { 1618 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1619 } 1620 m = rxq->rxq_mconsumer; 1621 if ((rxq->rxq_mhead = m->m_next) == NULL) 1622 rxq->rxq_mtail = &rxq->rxq_mhead; 1623 rxq->rxq_mconsumer = m->m_next; 1624 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1625 } else { 1626 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1627 } 1628 #ifdef ETSEC_DEBUG 1629 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1630 #endif 1631 1632 /* 1633 * Wrap at the last entry! 1634 */ 1635 if (rxbd_flags & RXBD_W) { 1636 KASSERT(consumer + 1 == rxq->rxq_last); 1637 consumer = rxq->rxq_first; 1638 } else { 1639 consumer++; 1640 } 1641 #ifdef ETSEC_DEBUG 1642 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1643 #endif 1644 } 1645 } 1646 1647 static void 1648 pq3etsec_rxq_purge( 1649 struct pq3etsec_softc *sc, 1650 struct pq3etsec_rxqueue *rxq, 1651 bool discard) 1652 { 1653 struct mbuf *m; 1654 1655 if ((m = rxq->rxq_mhead) != NULL) { 1656 #ifdef ETSEC_DEBUG 1657 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1658 #endif 1659 1660 if (discard) { 1661 pq3etsec_rx_map_unload(sc, m); 1662 m_freem(m); 1663 } else { 1664 while (m != NULL) { 1665 struct mbuf *m0 = m->m_next; 1666 m->m_next = NULL; 1667 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1668 m = m0; 1669 } 1670 } 1671 1672 } 1673 1674 rxq->rxq_mconsumer = NULL; 1675 rxq->rxq_mhead = NULL; 1676 rxq->rxq_mtail = &rxq->rxq_mhead; 1677 rxq->rxq_inuse = 0; 1678 } 1679 1680 static void 1681 pq3etsec_rxq_reset( 1682 struct pq3etsec_softc *sc, 1683 struct pq3etsec_rxqueue *rxq) 1684 { 1685 /* 1686 * sync all the descriptors 1687 */ 1688 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1689 rxq->rxq_last - rxq->rxq_first); 1690 1691 /* 1692 * Make sure we own all descriptors in the ring. 1693 */ 1694 volatile struct rxbd *rxbd; 1695 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1696 rxbd->rxbd_flags = RXBD_I; 1697 } 1698 1699 /* 1700 * Last descriptor has the wrap flag. 1701 */ 1702 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1703 1704 /* 1705 * Reset the producer consumer indexes. 1706 */ 1707 rxq->rxq_consumer = rxq->rxq_first; 1708 rxq->rxq_producer = rxq->rxq_first; 1709 rxq->rxq_inuse = 0; 1710 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1711 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1712 1713 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1714 1715 /* 1716 * Restart the transmit at the first descriptor 1717 */ 1718 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1719 } 1720 1721 static int 1722 pq3etsec_rxq_attach( 1723 struct pq3etsec_softc *sc, 1724 struct pq3etsec_rxqueue *rxq, 1725 u_int qno) 1726 { 1727 size_t map_size = PAGE_SIZE; 1728 size_t desc_count = map_size / sizeof(struct rxbd); 1729 int error; 1730 void *descs; 1731 1732 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1733 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1734 if (error) 1735 return error; 1736 1737 memset(descs, 0, map_size); 1738 rxq->rxq_first = descs; 1739 rxq->rxq_last = rxq->rxq_first + desc_count; 1740 rxq->rxq_consumer = descs; 1741 rxq->rxq_producer = descs; 1742 1743 pq3etsec_rxq_purge(sc, rxq, true); 1744 pq3etsec_rxq_reset(sc, rxq); 1745 1746 rxq->rxq_reg_rbase = RBASEn(qno); 1747 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1748 1749 return 0; 1750 } 1751 1752 static bool 1753 pq3etsec_txq_active_p( 1754 struct pq3etsec_softc * const sc, 1755 struct pq3etsec_txqueue *txq) 1756 { 1757 return !IF_IS_EMPTY(&txq->txq_mbufs); 1758 } 1759 1760 static bool 1761 pq3etsec_txq_fillable_p( 1762 struct pq3etsec_softc * const sc, 1763 struct pq3etsec_txqueue *txq) 1764 { 1765 return txq->txq_free >= txq->txq_threshold; 1766 } 1767 1768 static int 1769 pq3etsec_txq_attach( 1770 struct pq3etsec_softc *sc, 1771 struct pq3etsec_txqueue *txq, 1772 u_int qno) 1773 { 1774 size_t map_size = PAGE_SIZE; 1775 size_t desc_count = map_size / sizeof(struct txbd); 1776 int error; 1777 void *descs; 1778 1779 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1780 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1781 if (error) 1782 return error; 1783 1784 memset(descs, 0, map_size); 1785 txq->txq_first = descs; 1786 txq->txq_last = txq->txq_first + desc_count; 1787 txq->txq_consumer = descs; 1788 txq->txq_producer = descs; 1789 1790 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1791 1792 txq->txq_reg_tbase = TBASEn(qno); 1793 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1794 1795 pq3etsec_txq_reset(sc, txq); 1796 1797 return 0; 1798 } 1799 1800 static int 1801 pq3etsec_txq_map_load( 1802 struct pq3etsec_softc *sc, 1803 struct pq3etsec_txqueue *txq, 1804 struct mbuf *m) 1805 { 1806 bus_dmamap_t map; 1807 int error; 1808 1809 map = M_GETCTX(m, bus_dmamap_t); 1810 if (map != NULL) 1811 return 0; 1812 1813 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1814 if (map == NULL) 1815 return ENOMEM; 1816 1817 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1818 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1819 if (error) 1820 return error; 1821 1822 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1823 BUS_DMASYNC_PREWRITE); 1824 M_SETCTX(m, map); 1825 return 0; 1826 } 1827 1828 static void 1829 pq3etsec_txq_map_unload( 1830 struct pq3etsec_softc *sc, 1831 struct pq3etsec_txqueue *txq, 1832 struct mbuf *m) 1833 { 1834 KASSERT(m); 1835 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1836 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1837 BUS_DMASYNC_POSTWRITE); 1838 bus_dmamap_unload(sc->sc_dmat, map); 1839 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1840 } 1841 1842 static bool 1843 pq3etsec_txq_produce( 1844 struct pq3etsec_softc *sc, 1845 struct pq3etsec_txqueue *txq, 1846 struct mbuf *m) 1847 { 1848 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1849 1850 if (map->dm_nsegs > txq->txq_free) 1851 return false; 1852 1853 /* 1854 * TCP Offload flag must be set in the first descriptor. 1855 */ 1856 volatile struct txbd *producer = txq->txq_producer; 1857 uint16_t last_flags = TXBD_L; 1858 uint16_t first_flags = TXBD_R 1859 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1860 1861 /* 1862 * If we've produced enough descriptors without consuming any 1863 * we need to ask for an interrupt to reclaim some. 1864 */ 1865 txq->txq_lastintr += map->dm_nsegs; 1866 if (txq->txq_lastintr >= txq->txq_threshold 1867 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1868 txq->txq_lastintr = 0; 1869 last_flags |= TXBD_I; 1870 } 1871 1872 #ifdef ETSEC_DEBUG 1873 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1874 #endif 1875 KASSERT(producer != txq->txq_last); 1876 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1877 producer->txbd_len = map->dm_segs[0].ds_len; 1878 1879 if (map->dm_nsegs > 1) { 1880 volatile struct txbd *start = producer + 1; 1881 size_t count = map->dm_nsegs - 1; 1882 for (u_int i = 1; i < map->dm_nsegs; i++) { 1883 if (__predict_false(++producer == txq->txq_last)) { 1884 producer = txq->txq_first; 1885 if (start < txq->txq_last) { 1886 pq3etsec_txq_desc_presync(sc, txq, 1887 start, txq->txq_last - start); 1888 count -= txq->txq_last - start; 1889 } 1890 start = txq->txq_first; 1891 } 1892 #ifdef ETSEC_DEBUG 1893 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1894 #endif 1895 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1896 producer->txbd_len = map->dm_segs[i].ds_len; 1897 producer->txbd_flags = TXBD_R 1898 | (producer->txbd_flags & TXBD_W) 1899 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1900 #if 0 1901 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1902 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1903 #endif 1904 } 1905 pq3etsec_txq_desc_presync(sc, txq, start, count); 1906 } else { 1907 first_flags |= last_flags; 1908 } 1909 1910 membar_producer(); 1911 txq->txq_producer->txbd_flags = 1912 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1913 #if 0 1914 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1915 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1916 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1917 #endif 1918 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1919 1920 /* 1921 * Reduce free count by the number of segments we consumed. 1922 */ 1923 txq->txq_free -= map->dm_nsegs; 1924 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1925 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1926 KASSERT(producer->txbd_flags & TXBD_L); 1927 #ifdef ETSEC_DEBUG 1928 txq->txq_lmbufs[producer - txq->txq_first] = m; 1929 #endif 1930 1931 #if 0 1932 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1933 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1934 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1935 #endif 1936 1937 if (++producer == txq->txq_last) 1938 txq->txq_producer = txq->txq_first; 1939 else 1940 txq->txq_producer = producer; 1941 IF_ENQUEUE(&txq->txq_mbufs, m); 1942 1943 /* 1944 * Restart the transmitter. 1945 */ 1946 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 1947 1948 return true; 1949 } 1950 1951 static void 1952 pq3etsec_tx_offload( 1953 struct pq3etsec_softc *sc, 1954 struct pq3etsec_txqueue *txq, 1955 struct mbuf **mp) 1956 { 1957 struct mbuf *m = *mp; 1958 u_int csum_flags = m->m_pkthdr.csum_flags; 1959 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m); 1960 1961 KASSERT(m->m_flags & M_PKTHDR); 1962 1963 /* 1964 * Let see if we are doing any offload first. 1965 */ 1966 if (csum_flags == 0 && vtag == 0) { 1967 m->m_flags &= ~M_HASFCB; 1968 return; 1969 } 1970 1971 uint16_t flags = 0; 1972 if (csum_flags & M_CSUM_IP) { 1973 flags |= TXFCB_IP 1974 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 1975 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 1976 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 1977 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 1978 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 1979 } 1980 if (vtag) { 1981 flags |= TXFCB_VLN; 1982 } 1983 if (flags == 0) { 1984 m->m_flags &= ~M_HASFCB; 1985 return; 1986 } 1987 1988 struct txfcb fcb; 1989 fcb.txfcb_flags = flags; 1990 if (csum_flags & M_CSUM_IPv4) 1991 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 1992 else 1993 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); 1994 fcb.txfcb_l3os = ETHER_HDR_LEN; 1995 fcb.txfcb_phcs = 0; 1996 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0; 1997 1998 #if 0 1999 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2000 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2001 fcb.txfcb_phcs, fcb.txfcb_vlctl); 2002 #endif 2003 2004 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2005 m->m_data -= sizeof(fcb); 2006 m->m_len += sizeof(fcb); 2007 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2008 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2009 m->m_data = m->m_pktdat; 2010 m->m_len += sizeof(fcb); 2011 } else { 2012 struct mbuf *mn; 2013 MGET(mn, M_DONTWAIT, m->m_type); 2014 if (mn == NULL) { 2015 if (csum_flags & M_CSUM_IP4) { 2016 #ifdef INET 2017 ip_undefer_csum(m, ETHER_HDR_LEN, 2018 csum_flags & M_CSUM_IP4); 2019 #else 2020 panic("%s: impossible M_CSUM flags %#x", 2021 device_xname(sc->sc_dev), csum_flags); 2022 #endif 2023 } else if (csum_flags & M_CSUM_IP6) { 2024 #ifdef INET6 2025 ip6_undefer_csum(m, ETHER_HDR_LEN, 2026 csum_flags & M_CSUM_IP6); 2027 #else 2028 panic("%s: impossible M_CSUM flags %#x", 2029 device_xname(sc->sc_dev), csum_flags); 2030 #endif 2031 } else if (vtag) { 2032 } 2033 2034 m->m_flags &= ~M_HASFCB; 2035 return; 2036 } 2037 2038 M_MOVE_PKTHDR(mn, m); 2039 mn->m_next = m; 2040 m = mn; 2041 MH_ALIGN(m, sizeof(fcb)); 2042 m->m_len = sizeof(fcb); 2043 *mp = m; 2044 } 2045 m->m_pkthdr.len += sizeof(fcb); 2046 m->m_flags |= M_HASFCB; 2047 *mtod(m, struct txfcb *) = fcb; 2048 return; 2049 } 2050 2051 static bool 2052 pq3etsec_txq_enqueue( 2053 struct pq3etsec_softc *sc, 2054 struct pq3etsec_txqueue *txq) 2055 { 2056 for (;;) { 2057 if (IF_QFULL(&txq->txq_mbufs)) 2058 return false; 2059 struct mbuf *m = txq->txq_next; 2060 if (m == NULL) { 2061 int s = splnet(); 2062 IF_DEQUEUE(&sc->sc_if.if_snd, m); 2063 splx(s); 2064 if (m == NULL) 2065 return true; 2066 M_SETCTX(m, NULL); 2067 pq3etsec_tx_offload(sc, txq, &m); 2068 } else { 2069 txq->txq_next = NULL; 2070 } 2071 int error = pq3etsec_txq_map_load(sc, txq, m); 2072 if (error) { 2073 aprint_error_dev(sc->sc_dev, 2074 "discarded packet due to " 2075 "dmamap load failure: %d\n", error); 2076 m_freem(m); 2077 continue; 2078 } 2079 KASSERT(txq->txq_next == NULL); 2080 if (!pq3etsec_txq_produce(sc, txq, m)) { 2081 txq->txq_next = m; 2082 return false; 2083 } 2084 KASSERT(txq->txq_next == NULL); 2085 } 2086 } 2087 2088 static bool 2089 pq3etsec_txq_consume( 2090 struct pq3etsec_softc *sc, 2091 struct pq3etsec_txqueue *txq) 2092 { 2093 struct ifnet * const ifp = &sc->sc_if; 2094 volatile struct txbd *consumer = txq->txq_consumer; 2095 size_t txfree = 0; 2096 2097 #if 0 2098 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2099 #endif 2100 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2101 2102 for (;;) { 2103 if (consumer == txq->txq_producer) { 2104 txq->txq_consumer = consumer; 2105 txq->txq_free += txfree; 2106 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2107 #if 0 2108 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2109 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2110 #endif 2111 KASSERT(txq->txq_lastintr == 0); 2112 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2113 return true; 2114 } 2115 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2116 const uint16_t txbd_flags = consumer->txbd_flags; 2117 if (txbd_flags & TXBD_R) { 2118 txq->txq_consumer = consumer; 2119 txq->txq_free += txfree; 2120 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2121 #if 0 2122 printf("%s: freed %zu descriptors\n", 2123 __func__, txfree); 2124 #endif 2125 return pq3etsec_txq_fillable_p(sc, txq); 2126 } 2127 2128 /* 2129 * If this is the last descriptor in the chain, get the 2130 * mbuf, free its dmamap, and free the mbuf chain itself. 2131 */ 2132 if (txbd_flags & TXBD_L) { 2133 struct mbuf *m; 2134 2135 IF_DEQUEUE(&txq->txq_mbufs, m); 2136 #ifdef ETSEC_DEBUG 2137 KASSERTMSG( 2138 m == txq->txq_lmbufs[consumer-txq->txq_first], 2139 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2140 __func__, consumer, consumer - txq->txq_first, 2141 txbd_flags, m, 2142 &txq->txq_lmbufs[consumer-txq->txq_first], 2143 txq->txq_lmbufs[consumer-txq->txq_first]); 2144 #endif 2145 KASSERT(m); 2146 pq3etsec_txq_map_unload(sc, txq, m); 2147 #if 0 2148 printf("%s: mbuf %p: consumed a %u byte packet\n", 2149 __func__, m, m->m_pkthdr.len); 2150 #endif 2151 if (m->m_flags & M_HASFCB) 2152 m_adj(m, sizeof(struct txfcb)); 2153 ifp->if_opackets++; 2154 ifp->if_obytes += m->m_pkthdr.len; 2155 if (m->m_flags & M_MCAST) 2156 ifp->if_omcasts++; 2157 if (txbd_flags & TXBD_ERRORS) 2158 ifp->if_oerrors++; 2159 m_freem(m); 2160 #ifdef ETSEC_DEBUG 2161 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2162 #endif 2163 } else { 2164 #ifdef ETSEC_DEBUG 2165 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2166 #endif 2167 } 2168 2169 /* 2170 * We own this packet again. Clear all flags except wrap. 2171 */ 2172 txfree++; 2173 //consumer->txbd_flags = txbd_flags & TXBD_W; 2174 2175 /* 2176 * Wrap at the last entry! 2177 */ 2178 if (txbd_flags & TXBD_W) { 2179 KASSERT(consumer + 1 == txq->txq_last); 2180 consumer = txq->txq_first; 2181 } else { 2182 consumer++; 2183 KASSERT(consumer < txq->txq_last); 2184 } 2185 } 2186 } 2187 2188 static void 2189 pq3etsec_txq_purge( 2190 struct pq3etsec_softc *sc, 2191 struct pq3etsec_txqueue *txq) 2192 { 2193 struct mbuf *m; 2194 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2195 2196 for (;;) { 2197 IF_DEQUEUE(&txq->txq_mbufs, m); 2198 if (m == NULL) 2199 break; 2200 pq3etsec_txq_map_unload(sc, txq, m); 2201 m_freem(m); 2202 } 2203 if ((m = txq->txq_next) != NULL) { 2204 txq->txq_next = NULL; 2205 pq3etsec_txq_map_unload(sc, txq, m); 2206 m_freem(m); 2207 } 2208 #ifdef ETSEC_DEBUG 2209 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2210 #endif 2211 } 2212 2213 static void 2214 pq3etsec_txq_reset( 2215 struct pq3etsec_softc *sc, 2216 struct pq3etsec_txqueue *txq) 2217 { 2218 /* 2219 * sync all the descriptors 2220 */ 2221 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2222 txq->txq_last - txq->txq_first); 2223 2224 /* 2225 * Make sure we own all descriptors in the ring. 2226 */ 2227 volatile struct txbd *txbd; 2228 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2229 txbd->txbd_flags = 0; 2230 } 2231 2232 /* 2233 * Last descriptor has the wrap flag. 2234 */ 2235 txbd->txbd_flags = TXBD_W; 2236 2237 /* 2238 * Reset the producer consumer indexes. 2239 */ 2240 txq->txq_consumer = txq->txq_first; 2241 txq->txq_producer = txq->txq_first; 2242 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2243 txq->txq_threshold = txq->txq_free / 2; 2244 txq->txq_lastintr = 0; 2245 2246 /* 2247 * What do we want to get interrupted on? 2248 */ 2249 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2250 2251 /* 2252 * Restart the transmit at the first descriptor 2253 */ 2254 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2255 } 2256 2257 static void 2258 pq3etsec_ifstart(struct ifnet *ifp) 2259 { 2260 struct pq3etsec_softc * const sc = ifp->if_softc; 2261 2262 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2263 softint_schedule(sc->sc_soft_ih); 2264 } 2265 2266 static void 2267 pq3etsec_tx_error( 2268 struct pq3etsec_softc * const sc) 2269 { 2270 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2271 2272 pq3etsec_txq_consume(sc, txq); 2273 2274 if (pq3etsec_txq_fillable_p(sc, txq)) 2275 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2276 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2277 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2278 } 2279 2280 if (pq3etsec_txq_active_p(sc, txq)) 2281 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2282 if (!pq3etsec_txq_enqueue(sc, txq)) { 2283 sc->sc_ev_tx_stall.ev_count++; 2284 sc->sc_if.if_flags |= IFF_OACTIVE; 2285 } 2286 2287 sc->sc_txerrors = 0; 2288 } 2289 2290 int 2291 pq3etsec_tx_intr(void *arg) 2292 { 2293 struct pq3etsec_softc * const sc = arg; 2294 2295 sc->sc_ev_tx_intr.ev_count++; 2296 2297 uint32_t ievent = etsec_read(sc, IEVENT); 2298 ievent &= IEVENT_TXF|IEVENT_TXB; 2299 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2300 2301 #if 0 2302 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2303 __func__, ievent, etsec_read(sc, IMASK)); 2304 #endif 2305 2306 if (ievent == 0) 2307 return 0; 2308 2309 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2310 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2311 etsec_write(sc, IMASK, sc->sc_imask); 2312 softint_schedule(sc->sc_soft_ih); 2313 return 1; 2314 } 2315 2316 int 2317 pq3etsec_rx_intr(void *arg) 2318 { 2319 struct pq3etsec_softc * const sc = arg; 2320 2321 sc->sc_ev_rx_intr.ev_count++; 2322 2323 uint32_t ievent = etsec_read(sc, IEVENT); 2324 ievent &= IEVENT_RXF|IEVENT_RXB; 2325 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2326 if (ievent == 0) 2327 return 0; 2328 2329 #if 0 2330 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2331 #endif 2332 2333 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2334 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2335 etsec_write(sc, IMASK, sc->sc_imask); 2336 softint_schedule(sc->sc_soft_ih); 2337 return 1; 2338 } 2339 2340 int 2341 pq3etsec_error_intr(void *arg) 2342 { 2343 struct pq3etsec_softc * const sc = arg; 2344 2345 sc->sc_ev_error_intr.ev_count++; 2346 2347 for (int rv = 0, soft_flags = 0;; rv = 1) { 2348 uint32_t ievent = etsec_read(sc, IEVENT); 2349 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2350 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2351 if (ievent == 0) { 2352 if (soft_flags) { 2353 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2354 softint_schedule(sc->sc_soft_ih); 2355 } 2356 return rv; 2357 } 2358 #if 0 2359 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2360 __func__, ievent, etsec_read(sc, IMASK)); 2361 #endif 2362 2363 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2364 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2365 etsec_write(sc, IMASK, sc->sc_imask); 2366 wakeup(sc); 2367 } 2368 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2369 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2370 etsec_write(sc, IMASK, sc->sc_imask); 2371 wakeup(&sc->sc_mii); 2372 } 2373 if (ievent & IEVENT_BSY) { 2374 soft_flags |= SOFT_RXBSY; 2375 sc->sc_imask &= ~IEVENT_BSY; 2376 etsec_write(sc, IMASK, sc->sc_imask); 2377 } 2378 if (ievent & IEVENT_TXE) { 2379 soft_flags |= SOFT_TXERROR; 2380 sc->sc_imask &= ~IEVENT_TXE; 2381 sc->sc_txerrors |= ievent; 2382 } 2383 if (ievent & IEVENT_TXC) { 2384 sc->sc_ev_tx_pause.ev_count++; 2385 } 2386 if (ievent & IEVENT_RXC) { 2387 sc->sc_ev_rx_pause.ev_count++; 2388 } 2389 if (ievent & IEVENT_DPE) { 2390 soft_flags |= SOFT_RESET; 2391 sc->sc_imask &= ~IEVENT_DPE; 2392 etsec_write(sc, IMASK, sc->sc_imask); 2393 } 2394 } 2395 } 2396 2397 void 2398 pq3etsec_soft_intr(void *arg) 2399 { 2400 struct pq3etsec_softc * const sc = arg; 2401 struct ifnet * const ifp = &sc->sc_if; 2402 2403 mutex_enter(sc->sc_lock); 2404 2405 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2406 2407 sc->sc_ev_soft_intr.ev_count++; 2408 2409 if (soft_flags & SOFT_RESET) { 2410 int s = splnet(); 2411 pq3etsec_ifinit(ifp); 2412 splx(s); 2413 soft_flags = 0; 2414 } 2415 2416 if (soft_flags & SOFT_RXBSY) { 2417 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2418 size_t threshold = 5 * rxq->rxq_threshold / 4; 2419 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2420 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2421 } else { 2422 sc->sc_imask |= IEVENT_BSY; 2423 } 2424 aprint_normal_dev(sc->sc_dev, 2425 "increasing receive buffers from %zu to %zu\n", 2426 rxq->rxq_threshold, threshold); 2427 rxq->rxq_threshold = threshold; 2428 } 2429 2430 if ((soft_flags & SOFT_TXINTR) 2431 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2432 /* 2433 * Let's do what we came here for. Consume transmitted 2434 * packets off the the transmit ring. 2435 */ 2436 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2437 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2438 sc->sc_ev_tx_stall.ev_count++; 2439 ifp->if_flags |= IFF_OACTIVE; 2440 } else { 2441 ifp->if_flags &= ~IFF_OACTIVE; 2442 } 2443 sc->sc_imask |= IEVENT_TXF; 2444 } 2445 2446 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2447 /* 2448 * Let's consume 2449 */ 2450 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2451 sc->sc_imask |= IEVENT_RXF; 2452 } 2453 2454 if (soft_flags & SOFT_TXERROR) { 2455 pq3etsec_tx_error(sc); 2456 sc->sc_imask |= IEVENT_TXE; 2457 } 2458 2459 if (ifp->if_flags & IFF_RUNNING) { 2460 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2461 etsec_write(sc, IMASK, sc->sc_imask); 2462 } else { 2463 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2464 } 2465 2466 mutex_exit(sc->sc_lock); 2467 } 2468 2469 static void 2470 pq3etsec_mii_tick(void *arg) 2471 { 2472 struct pq3etsec_softc * const sc = arg; 2473 mutex_enter(sc->sc_lock); 2474 callout_ack(&sc->sc_mii_callout); 2475 sc->sc_ev_mii_ticks.ev_count++; 2476 #ifdef DEBUG 2477 uint64_t now = mftb(); 2478 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2479 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2480 __func__, now - sc->sc_mii_last_tick); 2481 callout_stop(&sc->sc_mii_callout); 2482 } 2483 #endif 2484 mii_tick(&sc->sc_mii); 2485 int s = splnet(); 2486 if (sc->sc_soft_flags & SOFT_RESET) 2487 softint_schedule(sc->sc_soft_ih); 2488 splx(s); 2489 callout_schedule(&sc->sc_mii_callout, hz); 2490 #ifdef DEBUG 2491 sc->sc_mii_last_tick = now; 2492 #endif 2493 mutex_exit(sc->sc_lock); 2494 } 2495