1 /*- 2 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas of 3am Software Foundry. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #define _ARM32_BUS_DMA_PRIVATE 31 #define GMAC_PRIVATE 32 33 #include "locators.h" 34 #include "opt_broadcom.h" 35 36 #include <sys/cdefs.h> 37 38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.26 2014/02/23 21:19:06 matt Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/atomic.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/ioctl.h> 45 #include <sys/intr.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/workqueue.h> 51 52 #include <net/if.h> 53 #include <net/if_ether.h> 54 #include <net/if_media.h> 55 56 #include <net/if_dl.h> 57 58 #include <net/bpf.h> 59 60 #include <dev/mii/miivar.h> 61 62 #include <arm/locore.h> 63 64 #include <arm/broadcom/bcm53xx_reg.h> 65 #include <arm/broadcom/bcm53xx_var.h> 66 67 //#define BCMETH_MPSAFE 68 69 #ifdef BCMETH_COUNTERS 70 #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 71 #else 72 #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 73 #endif 74 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1) 75 76 #define BCMETH_MAXTXMBUFS 128 77 #define BCMETH_NTXSEGS 30 78 #define BCMETH_MAXRXMBUFS 255 79 #define BCMETH_MINRXMBUFS 64 80 #define BCMETH_NRXSEGS 1 81 #define BCMETH_RINGSIZE PAGE_SIZE 82 83 #if 1 84 #define BCMETH_RCVMAGIC 0xfeedface 85 #endif 86 87 static int bcmeth_ccb_match(device_t, cfdata_t, void *); 88 static void bcmeth_ccb_attach(device_t, device_t, void *); 89 90 struct bcmeth_txqueue { 91 bus_dmamap_t txq_descmap; 92 struct gmac_txdb *txq_consumer; 93 struct gmac_txdb *txq_producer; 94 struct gmac_txdb *txq_first; 95 struct gmac_txdb *txq_last; 96 struct ifqueue txq_mbufs; 97 struct mbuf *txq_next; 98 size_t txq_free; 99 size_t txq_threshold; 100 size_t txq_lastintr; 101 bus_size_t txq_reg_xmtaddrlo; 102 bus_size_t txq_reg_xmtptr; 103 bus_size_t txq_reg_xmtctl; 104 bus_size_t txq_reg_xmtsts0; 105 bus_size_t txq_reg_xmtsts1; 106 bus_dma_segment_t txq_descmap_seg; 107 }; 108 109 struct bcmeth_rxqueue { 110 bus_dmamap_t rxq_descmap; 111 struct gmac_rxdb *rxq_consumer; 112 struct gmac_rxdb *rxq_producer; 113 struct gmac_rxdb *rxq_first; 114 struct gmac_rxdb *rxq_last; 115 struct mbuf *rxq_mhead; 116 struct mbuf **rxq_mtail; 117 struct mbuf *rxq_mconsumer; 118 size_t rxq_inuse; 119 size_t rxq_threshold; 120 bus_size_t rxq_reg_rcvaddrlo; 121 bus_size_t rxq_reg_rcvptr; 122 bus_size_t rxq_reg_rcvctl; 123 bus_size_t rxq_reg_rcvsts0; 124 bus_size_t rxq_reg_rcvsts1; 125 bus_dma_segment_t rxq_descmap_seg; 126 }; 127 128 struct bcmeth_mapcache { 129 u_int dmc_nmaps; 130 u_int dmc_maxseg; 131 u_int dmc_maxmaps; 132 u_int dmc_maxmapsize; 133 bus_dmamap_t dmc_maps[0]; 134 }; 135 136 struct bcmeth_softc { 137 device_t sc_dev; 138 bus_space_tag_t sc_bst; 139 bus_space_handle_t sc_bsh; 140 bus_dma_tag_t sc_dmat; 141 kmutex_t *sc_lock; 142 kmutex_t *sc_hwlock; 143 struct ethercom sc_ec; 144 #define sc_if sc_ec.ec_if 145 struct ifmedia sc_media; 146 void *sc_soft_ih; 147 void *sc_ih; 148 149 struct bcmeth_rxqueue sc_rxq; 150 struct bcmeth_txqueue sc_txq; 151 152 size_t sc_rcvoffset; 153 uint32_t sc_macaddr[2]; 154 uint32_t sc_maxfrm; 155 uint32_t sc_cmdcfg; 156 uint32_t sc_intmask; 157 uint32_t sc_rcvlazy; 158 volatile uint32_t sc_soft_flags; 159 #define SOFT_RXINTR 0x01 160 #define SOFT_TXINTR 0x02 161 162 #ifdef BCMETH_COUNTERS 163 struct evcnt sc_ev_intr; 164 struct evcnt sc_ev_soft_intr; 165 struct evcnt sc_ev_work; 166 struct evcnt sc_ev_tx_stall; 167 struct evcnt sc_ev_rx_badmagic_lo; 168 struct evcnt sc_ev_rx_badmagic_hi; 169 #endif 170 171 struct ifqueue sc_rx_bufcache; 172 struct bcmeth_mapcache *sc_rx_mapcache; 173 struct bcmeth_mapcache *sc_tx_mapcache; 174 175 struct workqueue *sc_workq; 176 struct work sc_work; 177 178 volatile uint32_t sc_work_flags; 179 #define WORK_RXINTR 0x01 180 #define WORK_RXUNDERFLOW 0x02 181 #define WORK_REINIT 0x04 182 183 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 184 }; 185 186 static void bcmeth_ifstart(struct ifnet *); 187 static void bcmeth_ifwatchdog(struct ifnet *); 188 static int bcmeth_ifinit(struct ifnet *); 189 static void bcmeth_ifstop(struct ifnet *, int); 190 static int bcmeth_ifioctl(struct ifnet *, u_long, void *); 191 192 static int bcmeth_mapcache_create(struct bcmeth_softc *, 193 struct bcmeth_mapcache **, size_t, size_t, size_t); 194 static void bcmeth_mapcache_destroy(struct bcmeth_softc *, 195 struct bcmeth_mapcache *); 196 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *, 197 struct bcmeth_mapcache *); 198 static void bcmeth_mapcache_put(struct bcmeth_softc *, 199 struct bcmeth_mapcache *, bus_dmamap_t); 200 201 static int bcmeth_txq_attach(struct bcmeth_softc *, 202 struct bcmeth_txqueue *, u_int); 203 static void bcmeth_txq_purge(struct bcmeth_softc *, 204 struct bcmeth_txqueue *); 205 static void bcmeth_txq_reset(struct bcmeth_softc *, 206 struct bcmeth_txqueue *); 207 static bool bcmeth_txq_consume(struct bcmeth_softc *, 208 struct bcmeth_txqueue *); 209 static bool bcmeth_txq_produce(struct bcmeth_softc *, 210 struct bcmeth_txqueue *, struct mbuf *m); 211 static bool bcmeth_txq_active_p(struct bcmeth_softc *, 212 struct bcmeth_txqueue *); 213 214 static int bcmeth_rxq_attach(struct bcmeth_softc *, 215 struct bcmeth_rxqueue *, u_int); 216 static bool bcmeth_rxq_produce(struct bcmeth_softc *, 217 struct bcmeth_rxqueue *); 218 static void bcmeth_rxq_purge(struct bcmeth_softc *, 219 struct bcmeth_rxqueue *, bool); 220 static void bcmeth_rxq_reset(struct bcmeth_softc *, 221 struct bcmeth_rxqueue *); 222 223 static int bcmeth_intr(void *); 224 #ifdef BCMETH_MPSAFETX 225 static void bcmeth_soft_txintr(struct bcmeth_softc *); 226 #endif 227 static void bcmeth_soft_intr(void *); 228 static void bcmeth_worker(struct work *, void *); 229 230 static int bcmeth_mediachange(struct ifnet *); 231 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *); 232 233 static inline uint32_t 234 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o) 235 { 236 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o); 237 } 238 239 static inline void 240 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v) 241 { 242 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v); 243 } 244 245 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc), 246 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL); 247 248 static int 249 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux) 250 { 251 struct bcmccb_attach_args * const ccbaa = aux; 252 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 253 254 if (strcmp(cf->cf_name, loc->loc_name)) 255 return 0; 256 257 #ifdef DIAGNOSTIC 258 const int port = cf->cf_loc[BCMCCBCF_PORT]; 259 #endif 260 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port); 261 262 return 1; 263 } 264 265 static void 266 bcmeth_ccb_attach(device_t parent, device_t self, void *aux) 267 { 268 struct bcmeth_softc * const sc = device_private(self); 269 struct ethercom * const ec = &sc->sc_ec; 270 struct ifnet * const ifp = &ec->ec_if; 271 struct bcmccb_attach_args * const ccbaa = aux; 272 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 273 const char * const xname = device_xname(self); 274 prop_dictionary_t dict = device_properties(self); 275 int error; 276 277 sc->sc_bst = ccbaa->ccbaa_ccb_bst; 278 sc->sc_dmat = ccbaa->ccbaa_dmat; 279 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh, 280 loc->loc_offset, loc->loc_size, &sc->sc_bsh); 281 282 /* 283 * We need to use the coherent dma tag for the GMAC. 284 */ 285 sc->sc_dmat = &bcm53xx_coherent_dma_tag; 286 #if _ARM32_NEED_BUS_DMA_BOUNCE 287 if (device_cfdata(self)->cf_flags & 2) { 288 sc->sc_dmat = &bcm53xx_bounce_dma_tag; 289 } 290 #endif 291 292 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address"); 293 if (eaprop == NULL) { 294 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0); 295 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1); 296 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) { 297 aprint_error(": mac-address property is missing\n"); 298 return; 299 } 300 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff; 301 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff; 302 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff; 303 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff; 304 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff; 305 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff; 306 } else { 307 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 308 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 309 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop), 310 ETHER_ADDR_LEN); 311 } 312 sc->sc_dev = self; 313 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 314 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 315 316 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts 317 318 aprint_naive("\n"); 319 aprint_normal(": Gigabit Ethernet Controller\n"); 320 321 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0); 322 if (error) { 323 aprint_error(": failed to init rxq: %d\n", error); 324 return; 325 } 326 327 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 328 if (error) { 329 aprint_error(": failed to init txq: %d\n", error); 330 return; 331 } 332 333 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache, 334 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS); 335 if (error) { 336 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 337 return; 338 } 339 340 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 341 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS); 342 if (error) { 343 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 344 return; 345 } 346 347 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc, 348 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU); 349 if (error) { 350 aprint_error(": failed to create workqueue: %d\n", error); 351 return; 352 } 353 354 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET, 355 bcmeth_soft_intr, sc); 356 357 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL, 358 bcmeth_intr, sc); 359 360 if (sc->sc_ih == NULL) { 361 aprint_error_dev(self, "failed to establish interrupt %d\n", 362 loc->loc_intrs[0]); 363 } else { 364 aprint_normal_dev(self, "interrupting on irq %d\n", 365 loc->loc_intrs[0]); 366 } 367 368 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 369 ether_sprintf(sc->sc_enaddr)); 370 371 /* 372 * Since each port in plugged into the switch/flow-accelerator, 373 * we hard code at Gige Full-Duplex with Flow Control enabled. 374 */ 375 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX; 376 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE; 377 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange, 378 bcmeth_mediastatus); 379 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL); 380 ifmedia_set(&sc->sc_media, ifmedia); 381 382 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 383 384 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 385 ifp->if_softc = sc; 386 ifp->if_baudrate = IF_Mbps(1000); 387 ifp->if_capabilities = 0; 388 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 389 #ifdef BCMETH_MPSAFE 390 ifp->if_flags2 = IFF2_MPSAFE; 391 #endif 392 ifp->if_ioctl = bcmeth_ifioctl; 393 ifp->if_start = bcmeth_ifstart; 394 ifp->if_watchdog = bcmeth_ifwatchdog; 395 ifp->if_init = bcmeth_ifinit; 396 ifp->if_stop = bcmeth_ifstop; 397 IFQ_SET_READY(&ifp->if_snd); 398 399 bcmeth_ifstop(ifp, true); 400 401 /* 402 * Attach the interface. 403 */ 404 if_attach(ifp); 405 ether_ifattach(ifp, sc->sc_enaddr); 406 407 #ifdef BCMETH_COUNTERS 408 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 409 NULL, xname, "intr"); 410 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 411 NULL, xname, "soft intr"); 412 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC, 413 NULL, xname, "work items"); 414 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 415 NULL, xname, "tx stalls"); 416 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC, 417 NULL, xname, "rx badmagic lo"); 418 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC, 419 NULL, xname, "rx badmagic hi"); 420 #endif 421 } 422 423 static int 424 bcmeth_mediachange(struct ifnet *ifp) 425 { 426 //struct bcmeth_softc * const sc = ifp->if_softc; 427 return 0; 428 } 429 430 static void 431 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm) 432 { 433 //struct bcmeth_softc * const sc = ifp->if_softc; 434 435 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE; 436 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T; 437 } 438 439 static uint64_t 440 bcmeth_macaddr_create(const uint8_t *enaddr) 441 { 442 return (enaddr[3] << 0) // UNIMAC_MAC_0 443 | (enaddr[2] << 8) // UNIMAC_MAC_0 444 | (enaddr[1] << 16) // UNIMAC_MAC_0 445 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0 446 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1 447 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1 448 } 449 450 static int 451 bcmeth_ifinit(struct ifnet *ifp) 452 { 453 struct bcmeth_softc * const sc = ifp->if_softc; 454 int error = 0; 455 456 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 457 if (ifp->if_mtu > ETHERMTU_JUMBO) 458 return error; 459 460 KASSERT(ifp->if_flags & IFF_UP); 461 462 /* 463 * Stop the interface 464 */ 465 bcmeth_ifstop(ifp, 0); 466 467 /* 468 * Reserve enough space at the front so that we can insert a maxsized 469 * link header and a VLAN tag. Also make sure we have enough room for 470 * the rcvsts field as well. 471 */ 472 KASSERT(ALIGN(max_linkhdr) == max_linkhdr); 473 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu", 474 max_linkhdr, sizeof(struct ether_header)); 475 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header); 476 if (sc->sc_rcvoffset <= 4) 477 sc->sc_rcvoffset += 4; 478 KASSERT((sc->sc_rcvoffset & 3) == 2); 479 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET)); 480 KASSERT(sc->sc_rcvoffset >= 6); 481 482 /* 483 * If our frame size has changed (or it's our first time through) 484 * destroy the existing transmit mapcache. 485 */ 486 if (sc->sc_tx_mapcache != NULL 487 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 488 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache); 489 sc->sc_tx_mapcache = NULL; 490 } 491 492 if (sc->sc_tx_mapcache == NULL) { 493 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 494 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS); 495 if (error) 496 return error; 497 } 498 499 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE 500 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED) 501 | RX_ENA | TX_ENA; 502 503 if (ifp->if_flags & IFF_PROMISC) { 504 sc->sc_cmdcfg |= PROMISC_EN; 505 } else { 506 sc->sc_cmdcfg &= ~PROMISC_EN; 507 } 508 509 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl); 510 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr); 511 512 /* 513 * We make sure that a received Ethernet packet start on a non-word 514 * boundary so that the packet payload will be on a word boundary. 515 * So to check the destination address we keep around two words to 516 * quickly compare with. 517 */ 518 #if __ARMEL__ 519 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8); 520 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8) 521 | (lladdr[4] << 16) | (lladdr[5] << 24); 522 #else 523 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8); 524 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8) 525 | (lladdr[1] << 16) | (lladdr[2] << 24); 526 #endif 527 528 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR; 529 530 /* 5. Load RCVADDR_LO with new pointer */ 531 bcmeth_rxq_reset(sc, &sc->sc_rxq); 532 533 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 534 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET) 535 | RCVCTL_PARITY_DIS 536 | RCVCTL_OFLOW_CONTINUE 537 | __SHIFTIN(3, RCVCTL_BURSTLEN)); 538 539 /* 6. Load XMTADDR_LO with new pointer */ 540 bcmeth_txq_reset(sc, &sc->sc_txq); 541 542 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 543 | XMTCTL_PARITY_DIS 544 | __SHIFTIN(3, XMTCTL_BURSTLEN)); 545 546 /* 7. Setup other UNIMAC registers */ 547 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm); 548 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0)); 549 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32)); 550 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg); 551 552 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL); 553 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE; 554 devctl &= ~FLOW_CTRL_MODE; 555 devctl &= ~MIB_RD_RESET_EN; 556 devctl &= ~RXQ_OVERFLOW_CTRL_SEL; 557 devctl &= ~CPU_FLOW_CTRL_ON; 558 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl); 559 560 /* Setup lazy receive (at most 1ms). */ 561 const struct cpu_softc * const cpu = curcpu()->ci_softc; 562 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT) 563 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT); 564 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy); 565 566 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 567 sc->sc_intmask |= XMTINT_0|XMTUF; 568 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 569 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 570 571 572 /* 12. Enable receive queues in RQUEUE, */ 573 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF; 574 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 575 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE); 576 577 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 578 579 #if 0 580 aprint_normal_dev(sc->sc_dev, 581 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n", 582 devctl, sc->sc_cmdcfg, 583 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 584 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl)); 585 #endif 586 587 sc->sc_soft_flags = 0; 588 589 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 590 591 ifp->if_flags |= IFF_RUNNING; 592 593 return error; 594 } 595 596 static void 597 bcmeth_ifstop(struct ifnet *ifp, int disable) 598 { 599 struct bcmeth_softc * const sc = ifp->if_softc; 600 struct bcmeth_txqueue * const txq = &sc->sc_txq; 601 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 602 603 KASSERT(!cpu_intr_p()); 604 605 sc->sc_soft_flags = 0; 606 sc->sc_work_flags = 0; 607 608 /* Disable Rx processing */ 609 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl, 610 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE); 611 612 /* Disable Tx processing */ 613 bcmeth_write_4(sc, txq->txq_reg_xmtctl, 614 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE); 615 616 /* Disable all interrupts */ 617 bcmeth_write_4(sc, GMAC_INTMASK, 0); 618 619 for (;;) { 620 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 621 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 622 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS 623 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS) 624 break; 625 delay(50); 626 } 627 /* 628 * Now reset the controller. 629 * 630 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register 631 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register 632 */ 633 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET); 634 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0); 635 sc->sc_intmask = 0; 636 ifp->if_flags &= ~IFF_RUNNING; 637 638 /* 639 * Let's consume any remaining transmitted packets. And if we are 640 * disabling the interface, purge ourselves of any untransmitted 641 * packets. But don't consume any received packets, just drop them. 642 * If we aren't disabling the interface, save the mbufs in the 643 * receive queue for reuse. 644 */ 645 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable); 646 bcmeth_txq_consume(sc, &sc->sc_txq); 647 if (disable) { 648 bcmeth_txq_purge(sc, &sc->sc_txq); 649 IF_PURGE(&ifp->if_snd); 650 } 651 652 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0); 653 } 654 655 static void 656 bcmeth_ifwatchdog(struct ifnet *ifp) 657 { 658 } 659 660 static int 661 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 662 { 663 struct bcmeth_softc *sc = ifp->if_softc; 664 struct ifreq * const ifr = data; 665 const int s = splnet(); 666 int error; 667 668 switch (cmd) { 669 case SIOCSIFMEDIA: 670 case SIOCGIFMEDIA: 671 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 672 break; 673 674 default: 675 error = ether_ioctl(ifp, cmd, data); 676 if (error != ENETRESET) 677 break; 678 679 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 680 error = 0; 681 break; 682 } 683 error = bcmeth_ifinit(ifp); 684 break; 685 } 686 687 splx(s); 688 return error; 689 } 690 691 static void 692 bcmeth_rxq_desc_presync( 693 struct bcmeth_softc *sc, 694 struct bcmeth_rxqueue *rxq, 695 struct gmac_rxdb *rxdb, 696 size_t count) 697 { 698 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 699 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 700 BUS_DMASYNC_PREWRITE); 701 } 702 703 static void 704 bcmeth_rxq_desc_postsync( 705 struct bcmeth_softc *sc, 706 struct bcmeth_rxqueue *rxq, 707 struct gmac_rxdb *rxdb, 708 size_t count) 709 { 710 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 711 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 712 BUS_DMASYNC_POSTWRITE); 713 } 714 715 static void 716 bcmeth_txq_desc_presync( 717 struct bcmeth_softc *sc, 718 struct bcmeth_txqueue *txq, 719 struct gmac_txdb *txdb, 720 size_t count) 721 { 722 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 723 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 724 BUS_DMASYNC_PREWRITE); 725 } 726 727 static void 728 bcmeth_txq_desc_postsync( 729 struct bcmeth_softc *sc, 730 struct bcmeth_txqueue *txq, 731 struct gmac_txdb *txdb, 732 size_t count) 733 { 734 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 735 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 736 BUS_DMASYNC_POSTWRITE); 737 } 738 739 static bus_dmamap_t 740 bcmeth_mapcache_get( 741 struct bcmeth_softc *sc, 742 struct bcmeth_mapcache *dmc) 743 { 744 KASSERT(dmc->dmc_nmaps > 0); 745 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 746 return dmc->dmc_maps[--dmc->dmc_nmaps]; 747 } 748 749 static void 750 bcmeth_mapcache_put( 751 struct bcmeth_softc *sc, 752 struct bcmeth_mapcache *dmc, 753 bus_dmamap_t map) 754 { 755 KASSERT(map != NULL); 756 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 757 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 758 } 759 760 static void 761 bcmeth_mapcache_destroy( 762 struct bcmeth_softc *sc, 763 struct bcmeth_mapcache *dmc) 764 { 765 const size_t dmc_size = 766 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]); 767 768 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 769 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 770 } 771 kmem_intr_free(dmc, dmc_size); 772 } 773 774 static int 775 bcmeth_mapcache_create( 776 struct bcmeth_softc *sc, 777 struct bcmeth_mapcache **dmc_p, 778 size_t maxmaps, 779 size_t maxmapsize, 780 size_t maxseg) 781 { 782 const size_t dmc_size = 783 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]); 784 struct bcmeth_mapcache * const dmc = 785 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 786 787 dmc->dmc_maxmaps = maxmaps; 788 dmc->dmc_nmaps = maxmaps; 789 dmc->dmc_maxmapsize = maxmapsize; 790 dmc->dmc_maxseg = maxseg; 791 792 for (u_int i = 0; i < maxmaps; i++) { 793 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 794 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 795 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 796 if (error) { 797 aprint_error_dev(sc->sc_dev, 798 "failed to creat dma map cache " 799 "entry %u of %zu: %d\n", 800 i, maxmaps, error); 801 while (i-- > 0) { 802 bus_dmamap_destroy(sc->sc_dmat, 803 dmc->dmc_maps[i]); 804 } 805 kmem_intr_free(dmc, dmc_size); 806 return error; 807 } 808 KASSERT(dmc->dmc_maps[i] != NULL); 809 } 810 811 *dmc_p = dmc; 812 813 return 0; 814 } 815 816 #if 0 817 static void 818 bcmeth_dmamem_free( 819 bus_dma_tag_t dmat, 820 size_t map_size, 821 bus_dma_segment_t *seg, 822 bus_dmamap_t map, 823 void *kvap) 824 { 825 bus_dmamap_destroy(dmat, map); 826 bus_dmamem_unmap(dmat, kvap, map_size); 827 bus_dmamem_free(dmat, seg, 1); 828 } 829 #endif 830 831 static int 832 bcmeth_dmamem_alloc( 833 bus_dma_tag_t dmat, 834 size_t map_size, 835 bus_dma_segment_t *seg, 836 bus_dmamap_t *map, 837 void **kvap) 838 { 839 int error; 840 int nseg; 841 842 *kvap = NULL; 843 *map = NULL; 844 845 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0, 846 seg, 1, &nseg, 0); 847 if (error) 848 return error; 849 850 KASSERT(nseg == 1); 851 852 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0); 853 if (error == 0) { 854 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 855 map); 856 if (error == 0) { 857 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 858 NULL, 0); 859 if (error == 0) 860 return 0; 861 bus_dmamap_destroy(dmat, *map); 862 *map = NULL; 863 } 864 bus_dmamem_unmap(dmat, *kvap, map_size); 865 *kvap = NULL; 866 } 867 bus_dmamem_free(dmat, seg, nseg); 868 return 0; 869 } 870 871 static struct mbuf * 872 bcmeth_rx_buf_alloc( 873 struct bcmeth_softc *sc) 874 { 875 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 876 if (m == NULL) { 877 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 878 return NULL; 879 } 880 MCLGET(m, M_DONTWAIT); 881 if ((m->m_flags & M_EXT) == 0) { 882 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 883 m_freem(m); 884 return NULL; 885 } 886 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 887 888 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache); 889 if (map == NULL) { 890 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 891 m_freem(m); 892 return NULL; 893 } 894 M_SETCTX(m, map); 895 m->m_len = m->m_pkthdr.len = MCLBYTES; 896 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 897 BUS_DMA_READ|BUS_DMA_NOWAIT); 898 if (error) { 899 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 900 error); 901 M_SETCTX(m, NULL); 902 m_freem(m); 903 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 904 return NULL; 905 } 906 KASSERT(map->dm_mapsize == MCLBYTES); 907 #ifdef BCMETH_RCVMAGIC 908 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC); 909 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t), 910 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 911 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t), 912 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD); 913 #else 914 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 915 BUS_DMASYNC_PREREAD); 916 #endif 917 918 return m; 919 } 920 921 static void 922 bcmeth_rx_map_unload( 923 struct bcmeth_softc *sc, 924 struct mbuf *m) 925 { 926 KASSERT(m); 927 for (; m != NULL; m = m->m_next) { 928 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 929 KASSERT(map); 930 KASSERT(map->dm_mapsize == MCLBYTES); 931 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 932 BUS_DMASYNC_POSTREAD); 933 bus_dmamap_unload(sc->sc_dmat, map); 934 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 935 M_SETCTX(m, NULL); 936 } 937 } 938 939 static bool 940 bcmeth_rxq_produce( 941 struct bcmeth_softc *sc, 942 struct bcmeth_rxqueue *rxq) 943 { 944 struct gmac_rxdb *producer = rxq->rxq_producer; 945 bool produced = false; 946 947 while (rxq->rxq_inuse < rxq->rxq_threshold) { 948 struct mbuf *m; 949 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 950 if (m == NULL) { 951 m = bcmeth_rx_buf_alloc(sc); 952 if (m == NULL) { 953 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__); 954 break; 955 } 956 } 957 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 958 KASSERT(map); 959 960 producer->rxdb_buflen = htole32(MCLBYTES); 961 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr); 962 producer->rxdb_flags &= htole32(RXDB_FLAG_ET); 963 *rxq->rxq_mtail = m; 964 rxq->rxq_mtail = &m->m_next; 965 m->m_len = MCLBYTES; 966 m->m_next = NULL; 967 rxq->rxq_inuse++; 968 if (++producer == rxq->rxq_last) { 969 membar_producer(); 970 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 971 rxq->rxq_last - rxq->rxq_producer); 972 producer = rxq->rxq_producer = rxq->rxq_first; 973 } 974 produced = true; 975 } 976 if (produced) { 977 membar_producer(); 978 if (producer != rxq->rxq_producer) { 979 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 980 producer - rxq->rxq_producer); 981 rxq->rxq_producer = producer; 982 } 983 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr, 984 rxq->rxq_descmap->dm_segs[0].ds_addr 985 + ((uintptr_t)producer & RCVPTR)); 986 } 987 return true; 988 } 989 990 static void 991 bcmeth_rx_input( 992 struct bcmeth_softc *sc, 993 struct mbuf *m, 994 uint32_t rxdb_flags) 995 { 996 struct ifnet * const ifp = &sc->sc_if; 997 998 bcmeth_rx_map_unload(sc, m); 999 1000 m_adj(m, sc->sc_rcvoffset); 1001 1002 /* 1003 * If we are in promiscuous mode and this isn't a multicast, check the 1004 * destination address to make sure it matches our own. If it doesn't, 1005 * mark the packet as being received promiscuously. 1006 */ 1007 if ((sc->sc_cmdcfg & PROMISC_EN) 1008 && (m->m_data[0] & 1) == 0 1009 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0] 1010 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) { 1011 m->m_flags |= M_PROMISC; 1012 } 1013 m->m_pkthdr.rcvif = ifp; 1014 1015 ifp->if_ipackets++; 1016 ifp->if_ibytes += m->m_pkthdr.len; 1017 1018 /* 1019 * Let's give it to the network subsystm to deal with. 1020 */ 1021 #ifdef BCMETH_MPSAFE 1022 mutex_exit(sc->sc_lock); 1023 (*ifp->if_input)(ifp, m); 1024 mutex_enter(sc->sc_lock); 1025 #else 1026 int s = splnet(); 1027 bpf_mtap(ifp, m); 1028 (*ifp->if_input)(ifp, m); 1029 splx(s); 1030 #endif 1031 } 1032 1033 static bool 1034 bcmeth_rxq_consume( 1035 struct bcmeth_softc *sc, 1036 struct bcmeth_rxqueue *rxq, 1037 size_t atmost) 1038 { 1039 struct ifnet * const ifp = &sc->sc_if; 1040 struct gmac_rxdb *consumer = rxq->rxq_consumer; 1041 size_t rxconsumed = 0; 1042 bool didconsume = false; 1043 1044 while (atmost-- > 0) { 1045 if (consumer == rxq->rxq_producer) { 1046 KASSERT(rxq->rxq_inuse == 0); 1047 break; 1048 } 1049 1050 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1051 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1052 if (consumer == rxq->rxq_first + currdscr) { 1053 break; 1054 } 1055 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1); 1056 1057 /* 1058 * We own this packet again. Copy the rxsts word from it. 1059 */ 1060 rxconsumed++; 1061 didconsume = true; 1062 uint32_t rxsts; 1063 KASSERT(rxq->rxq_mhead != NULL); 1064 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t); 1065 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align, 1066 BUS_DMASYNC_POSTREAD); 1067 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4); 1068 rxsts = le32toh(rxsts); 1069 #if 0 1070 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd", 1071 currdscr, consumer - rxq->rxq_first); 1072 #endif 1073 1074 /* 1075 * Get the count of descriptors. Fetch the correct number 1076 * of mbufs. 1077 */ 1078 #ifdef BCMETH_RCVMAGIC 1079 size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1; 1080 #else 1081 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1; 1082 #endif 1083 struct mbuf *m = rxq->rxq_mhead; 1084 struct mbuf *m_last = m; 1085 for (size_t i = 1; i < desc_count; i++) { 1086 if (++consumer == rxq->rxq_last) { 1087 consumer = rxq->rxq_first; 1088 } 1089 KASSERTMSG(consumer != rxq->rxq_first + currdscr, 1090 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd", 1091 i, rxsts, desc_count, currdscr, 1092 consumer - rxq->rxq_first); 1093 m_last = m_last->m_next; 1094 } 1095 1096 /* 1097 * Now remove it/them from the list of enqueued mbufs. 1098 */ 1099 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1100 rxq->rxq_mtail = &rxq->rxq_mhead; 1101 m_last->m_next = NULL; 1102 1103 #ifdef BCMETH_RCVMAGIC 1104 if (rxsts == BCMETH_RCVMAGIC) { 1105 ifp->if_ierrors++; 1106 if ((m->m_ext.ext_paddr >> 28) == 8) { 1107 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo); 1108 } else { 1109 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi); 1110 } 1111 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1112 } else 1113 #endif /* BCMETH_RCVMAGIC */ 1114 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) { 1115 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n", 1116 consumer - rxq->rxq_first, desc_count, rxsts); 1117 /* 1118 * We encountered an error, take the mbufs and add them 1119 * to the rx bufcache so we can quickly reuse them. 1120 */ 1121 ifp->if_ierrors++; 1122 do { 1123 struct mbuf *m0 = m->m_next; 1124 m->m_next = NULL; 1125 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1126 m = m0; 1127 } while (m); 1128 } else { 1129 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN); 1130 framelen += sc->sc_rcvoffset; 1131 m->m_pkthdr.len = framelen; 1132 if (desc_count == 1) { 1133 KASSERT(framelen <= MCLBYTES); 1134 m->m_len = framelen; 1135 } else { 1136 m_last->m_len = framelen & (MCLBYTES - 1); 1137 } 1138 1139 #ifdef BCMETH_MPSAFE 1140 /* 1141 * Wrap at the last entry! 1142 */ 1143 if (++consumer == rxq->rxq_last) { 1144 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1145 rxq->rxq_consumer = rxq->rxq_first; 1146 } else { 1147 rxq->rxq_consumer = consumer; 1148 } 1149 rxq->rxq_inuse -= rxconsumed; 1150 #endif /* BCMETH_MPSAFE */ 1151 1152 /* 1153 * Receive the packet (which releases our lock) 1154 */ 1155 bcmeth_rx_input(sc, m, rxsts); 1156 1157 #ifdef BCMETH_MPSAFE 1158 /* 1159 * Since we had to give up our lock, we need to 1160 * refresh these. 1161 */ 1162 consumer = rxq->rxq_consumer; 1163 rxconsumed = 0; 1164 continue; 1165 #endif /* BCMETH_MPSAFE */ 1166 } 1167 1168 /* 1169 * Wrap at the last entry! 1170 */ 1171 if (++consumer == rxq->rxq_last) { 1172 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1173 consumer = rxq->rxq_first; 1174 } 1175 } 1176 1177 /* 1178 * Update queue info. 1179 */ 1180 rxq->rxq_consumer = consumer; 1181 rxq->rxq_inuse -= rxconsumed; 1182 1183 /* 1184 * Did we consume anything? 1185 */ 1186 return didconsume; 1187 } 1188 1189 static void 1190 bcmeth_rxq_purge( 1191 struct bcmeth_softc *sc, 1192 struct bcmeth_rxqueue *rxq, 1193 bool discard) 1194 { 1195 struct mbuf *m; 1196 1197 if ((m = rxq->rxq_mhead) != NULL) { 1198 if (discard) { 1199 bcmeth_rx_map_unload(sc, m); 1200 m_freem(m); 1201 } else { 1202 while (m != NULL) { 1203 struct mbuf *m0 = m->m_next; 1204 m->m_next = NULL; 1205 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1206 m = m0; 1207 } 1208 } 1209 1210 } 1211 1212 rxq->rxq_mhead = NULL; 1213 rxq->rxq_mtail = &rxq->rxq_mhead; 1214 rxq->rxq_inuse = 0; 1215 } 1216 1217 static void 1218 bcmeth_rxq_reset( 1219 struct bcmeth_softc *sc, 1220 struct bcmeth_rxqueue *rxq) 1221 { 1222 /* 1223 * sync all the descriptors 1224 */ 1225 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1226 rxq->rxq_last - rxq->rxq_first); 1227 1228 /* 1229 * Make sure we own all descriptors in the ring. 1230 */ 1231 struct gmac_rxdb *rxdb; 1232 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) { 1233 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC); 1234 } 1235 1236 /* 1237 * Last descriptor has the wrap flag. 1238 */ 1239 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET|RXDB_FLAG_IC); 1240 1241 /* 1242 * Reset the producer consumer indexes. 1243 */ 1244 rxq->rxq_consumer = rxq->rxq_first; 1245 rxq->rxq_producer = rxq->rxq_first; 1246 rxq->rxq_inuse = 0; 1247 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS) 1248 rxq->rxq_threshold = BCMETH_MINRXMBUFS; 1249 1250 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF; 1251 1252 /* 1253 * Restart the receiver at the first descriptor 1254 */ 1255 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo, 1256 rxq->rxq_descmap->dm_segs[0].ds_addr); 1257 } 1258 1259 static int 1260 bcmeth_rxq_attach( 1261 struct bcmeth_softc *sc, 1262 struct bcmeth_rxqueue *rxq, 1263 u_int qno) 1264 { 1265 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]); 1266 int error; 1267 void *descs; 1268 1269 KASSERT(desc_count == 256 || desc_count == 512); 1270 1271 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1272 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1273 if (error) 1274 return error; 1275 1276 memset(descs, 0, BCMETH_RINGSIZE); 1277 rxq->rxq_first = descs; 1278 rxq->rxq_last = rxq->rxq_first + desc_count; 1279 rxq->rxq_consumer = descs; 1280 rxq->rxq_producer = descs; 1281 1282 bcmeth_rxq_purge(sc, rxq, true); 1283 bcmeth_rxq_reset(sc, rxq); 1284 1285 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW; 1286 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL; 1287 rxq->rxq_reg_rcvptr = GMAC_RCVPTR; 1288 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0; 1289 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1; 1290 1291 return 0; 1292 } 1293 1294 static bool 1295 bcmeth_txq_active_p( 1296 struct bcmeth_softc * const sc, 1297 struct bcmeth_txqueue *txq) 1298 { 1299 return !IF_IS_EMPTY(&txq->txq_mbufs); 1300 } 1301 1302 static bool 1303 bcmeth_txq_fillable_p( 1304 struct bcmeth_softc * const sc, 1305 struct bcmeth_txqueue *txq) 1306 { 1307 return txq->txq_free >= txq->txq_threshold; 1308 } 1309 1310 static int 1311 bcmeth_txq_attach( 1312 struct bcmeth_softc *sc, 1313 struct bcmeth_txqueue *txq, 1314 u_int qno) 1315 { 1316 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]); 1317 int error; 1318 void *descs; 1319 1320 KASSERT(desc_count == 256 || desc_count == 512); 1321 1322 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1323 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1324 if (error) 1325 return error; 1326 1327 memset(descs, 0, BCMETH_RINGSIZE); 1328 txq->txq_first = descs; 1329 txq->txq_last = txq->txq_first + desc_count; 1330 txq->txq_consumer = descs; 1331 txq->txq_producer = descs; 1332 1333 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS); 1334 1335 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW; 1336 txq->txq_reg_xmtctl = GMAC_XMTCONTROL; 1337 txq->txq_reg_xmtptr = GMAC_XMTPTR; 1338 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0; 1339 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1; 1340 1341 bcmeth_txq_reset(sc, txq); 1342 1343 return 0; 1344 } 1345 1346 static int 1347 bcmeth_txq_map_load( 1348 struct bcmeth_softc *sc, 1349 struct bcmeth_txqueue *txq, 1350 struct mbuf *m) 1351 { 1352 bus_dmamap_t map; 1353 int error; 1354 1355 map = M_GETCTX(m, bus_dmamap_t); 1356 if (map != NULL) 1357 return 0; 1358 1359 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache); 1360 if (map == NULL) 1361 return ENOMEM; 1362 1363 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1364 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1365 if (error) 1366 return error; 1367 1368 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1369 BUS_DMASYNC_PREWRITE); 1370 M_SETCTX(m, map); 1371 return 0; 1372 } 1373 1374 static void 1375 bcmeth_txq_map_unload( 1376 struct bcmeth_softc *sc, 1377 struct bcmeth_txqueue *txq, 1378 struct mbuf *m) 1379 { 1380 KASSERT(m); 1381 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1382 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1383 BUS_DMASYNC_POSTWRITE); 1384 bus_dmamap_unload(sc->sc_dmat, map); 1385 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map); 1386 } 1387 1388 static bool 1389 bcmeth_txq_produce( 1390 struct bcmeth_softc *sc, 1391 struct bcmeth_txqueue *txq, 1392 struct mbuf *m) 1393 { 1394 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1395 1396 if (map->dm_nsegs > txq->txq_free) 1397 return false; 1398 1399 /* 1400 * TCP Offload flag must be set in the first descriptor. 1401 */ 1402 struct gmac_txdb *producer = txq->txq_producer; 1403 uint32_t first_flags = TXDB_FLAG_SF; 1404 uint32_t last_flags = TXDB_FLAG_EF; 1405 1406 /* 1407 * If we've produced enough descriptors without consuming any 1408 * we need to ask for an interrupt to reclaim some. 1409 */ 1410 txq->txq_lastintr += map->dm_nsegs; 1411 if (txq->txq_lastintr >= txq->txq_threshold 1412 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1413 txq->txq_lastintr = 0; 1414 last_flags |= TXDB_FLAG_IC; 1415 } 1416 1417 KASSERT(producer != txq->txq_last); 1418 1419 struct gmac_txdb *start = producer; 1420 size_t count = map->dm_nsegs; 1421 producer->txdb_flags |= htole32(first_flags); 1422 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr); 1423 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len); 1424 for (u_int i = 1; i < map->dm_nsegs; i++) { 1425 #if 0 1426 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1427 le32toh(producer->txdb_flags), 1428 le32toh(producer->txdb_buflen), 1429 le32toh(producer->txdb_addrlo), 1430 le32toh(producer->txdb_addrhi)); 1431 #endif 1432 if (__predict_false(++producer == txq->txq_last)) { 1433 bcmeth_txq_desc_presync(sc, txq, start, 1434 txq->txq_last - start); 1435 count -= txq->txq_last - start; 1436 producer = txq->txq_first; 1437 start = txq->txq_first; 1438 } 1439 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr); 1440 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len); 1441 } 1442 producer->txdb_flags |= htole32(last_flags); 1443 #if 0 1444 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1445 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen), 1446 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi)); 1447 #endif 1448 if (count) 1449 bcmeth_txq_desc_presync(sc, txq, start, count); 1450 1451 /* 1452 * Reduce free count by the number of segments we consumed. 1453 */ 1454 txq->txq_free -= map->dm_nsegs; 1455 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1456 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0); 1457 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF)); 1458 1459 #if 0 1460 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n", 1461 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1462 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1463 #endif 1464 1465 if (producer + 1 == txq->txq_last) 1466 txq->txq_producer = txq->txq_first; 1467 else 1468 txq->txq_producer = producer + 1; 1469 IF_ENQUEUE(&txq->txq_mbufs, m); 1470 1471 /* 1472 * Let the transmitter know there's more to do 1473 */ 1474 bcmeth_write_4(sc, txq->txq_reg_xmtptr, 1475 txq->txq_descmap->dm_segs[0].ds_addr 1476 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR)); 1477 1478 return true; 1479 } 1480 1481 static struct mbuf * 1482 bcmeth_copy_packet(struct mbuf *m) 1483 { 1484 struct mbuf *mext = NULL; 1485 size_t misalignment = 0; 1486 size_t hlen = 0; 1487 1488 for (mext = m; mext != NULL; mext = mext->m_next) { 1489 if (mext->m_flags & M_EXT) { 1490 misalignment = mtod(mext, vaddr_t) & arm_dcache_align; 1491 break; 1492 } 1493 hlen += m->m_len; 1494 } 1495 1496 struct mbuf *n = m->m_next; 1497 if (m != mext && hlen + misalignment <= MHLEN && false) { 1498 KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]); 1499 size_t oldoff = m->m_data - m->m_pktdat; 1500 size_t off; 1501 if (mext == NULL) { 1502 off = (oldoff + hlen > MHLEN) ? 0 : oldoff; 1503 } else { 1504 off = MHLEN - (hlen + misalignment); 1505 } 1506 KASSERT(off + hlen + misalignment <= MHLEN); 1507 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) { 1508 memmove(&m->m_pktdat[off], m->m_data, m->m_len); 1509 m->m_data = &m->m_pktdat[off]; 1510 } 1511 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]); 1512 m->m_len = hlen; 1513 m->m_next = mext; 1514 while (n != mext) { 1515 n = m_free(n); 1516 } 1517 return m; 1518 } 1519 1520 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type); 1521 if (m0 == NULL) { 1522 return NULL; 1523 } 1524 M_COPY_PKTHDR(m0, m); 1525 MCLAIM(m0, m->m_owner); 1526 if (m0->m_pkthdr.len > MHLEN) { 1527 MCLGET(m0, M_DONTWAIT); 1528 if ((m0->m_flags & M_EXT) == 0) { 1529 m_freem(m0); 1530 return NULL; 1531 } 1532 } 1533 m0->m_len = m->m_pkthdr.len; 1534 m_copydata(m, 0, m0->m_len, mtod(m0, void *)); 1535 m_freem(m); 1536 return m0; 1537 } 1538 1539 static bool 1540 bcmeth_txq_enqueue( 1541 struct bcmeth_softc *sc, 1542 struct bcmeth_txqueue *txq) 1543 { 1544 for (;;) { 1545 if (IF_QFULL(&txq->txq_mbufs)) 1546 return false; 1547 struct mbuf *m = txq->txq_next; 1548 if (m == NULL) { 1549 int s = splnet(); 1550 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1551 splx(s); 1552 if (m == NULL) 1553 return true; 1554 M_SETCTX(m, NULL); 1555 } else { 1556 txq->txq_next = NULL; 1557 } 1558 /* 1559 * If LINK2 is set and this packet uses multiple mbufs, 1560 * consolidate it into a single mbuf. 1561 */ 1562 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) { 1563 struct mbuf *m0 = bcmeth_copy_packet(m); 1564 if (m0 == NULL) { 1565 txq->txq_next = m; 1566 return true; 1567 } 1568 m = m0; 1569 } 1570 int error = bcmeth_txq_map_load(sc, txq, m); 1571 if (error) { 1572 aprint_error_dev(sc->sc_dev, 1573 "discarded packet due to " 1574 "dmamap load failure: %d\n", error); 1575 m_freem(m); 1576 continue; 1577 } 1578 KASSERT(txq->txq_next == NULL); 1579 if (!bcmeth_txq_produce(sc, txq, m)) { 1580 txq->txq_next = m; 1581 return false; 1582 } 1583 KASSERT(txq->txq_next == NULL); 1584 } 1585 } 1586 1587 static bool 1588 bcmeth_txq_consume( 1589 struct bcmeth_softc *sc, 1590 struct bcmeth_txqueue *txq) 1591 { 1592 struct ifnet * const ifp = &sc->sc_if; 1593 struct gmac_txdb *consumer = txq->txq_consumer; 1594 size_t txfree = 0; 1595 1596 #if 0 1597 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1598 #endif 1599 1600 for (;;) { 1601 if (consumer == txq->txq_producer) { 1602 txq->txq_consumer = consumer; 1603 txq->txq_free += txfree; 1604 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1605 #if 0 1606 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n", 1607 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 1608 #endif 1609 KASSERT(txq->txq_lastintr == 0); 1610 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 1611 return true; 1612 } 1613 bcmeth_txq_desc_postsync(sc, txq, consumer, 1); 1614 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 1615 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) { 1616 txq->txq_consumer = consumer; 1617 txq->txq_free += txfree; 1618 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1619 #if 0 1620 printf("%s: freed %zu descriptors\n", 1621 __func__, txfree); 1622 #endif 1623 return bcmeth_txq_fillable_p(sc, txq); 1624 } 1625 1626 /* 1627 * If this is the last descriptor in the chain, get the 1628 * mbuf, free its dmamap, and free the mbuf chain itself. 1629 */ 1630 const uint32_t txdb_flags = le32toh(consumer->txdb_flags); 1631 if (txdb_flags & TXDB_FLAG_EF) { 1632 struct mbuf *m; 1633 1634 IF_DEQUEUE(&txq->txq_mbufs, m); 1635 KASSERT(m); 1636 bcmeth_txq_map_unload(sc, txq, m); 1637 #if 0 1638 printf("%s: mbuf %p: consumed a %u byte packet\n", 1639 __func__, m, m->m_pkthdr.len); 1640 #endif 1641 bpf_mtap(ifp, m); 1642 ifp->if_opackets++; 1643 ifp->if_obytes += m->m_pkthdr.len; 1644 if (m->m_flags & M_MCAST) 1645 ifp->if_omcasts++; 1646 m_freem(m); 1647 } 1648 1649 /* 1650 * We own this packet again. Clear all flags except wrap. 1651 */ 1652 txfree++; 1653 1654 /* 1655 * Wrap at the last entry! 1656 */ 1657 if (txdb_flags & TXDB_FLAG_ET) { 1658 consumer->txdb_flags = htole32(TXDB_FLAG_ET); 1659 KASSERT(consumer + 1 == txq->txq_last); 1660 consumer = txq->txq_first; 1661 } else { 1662 consumer->txdb_flags = 0; 1663 consumer++; 1664 KASSERT(consumer < txq->txq_last); 1665 } 1666 } 1667 } 1668 1669 static void 1670 bcmeth_txq_purge( 1671 struct bcmeth_softc *sc, 1672 struct bcmeth_txqueue *txq) 1673 { 1674 struct mbuf *m; 1675 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0); 1676 1677 for (;;) { 1678 IF_DEQUEUE(&txq->txq_mbufs, m); 1679 if (m == NULL) 1680 break; 1681 bcmeth_txq_map_unload(sc, txq, m); 1682 m_freem(m); 1683 } 1684 if ((m = txq->txq_next) != NULL) { 1685 txq->txq_next = NULL; 1686 bcmeth_txq_map_unload(sc, txq, m); 1687 m_freem(m); 1688 } 1689 } 1690 1691 static void 1692 bcmeth_txq_reset( 1693 struct bcmeth_softc *sc, 1694 struct bcmeth_txqueue *txq) 1695 { 1696 /* 1697 * sync all the descriptors 1698 */ 1699 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first, 1700 txq->txq_last - txq->txq_first); 1701 1702 /* 1703 * Make sure we own all descriptors in the ring. 1704 */ 1705 struct gmac_txdb *txdb; 1706 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) { 1707 txdb->txdb_flags = 0; 1708 } 1709 1710 /* 1711 * Last descriptor has the wrap flag. 1712 */ 1713 txdb->txdb_flags = htole32(TXDB_FLAG_ET); 1714 1715 /* 1716 * Reset the producer consumer indexes. 1717 */ 1718 txq->txq_consumer = txq->txq_first; 1719 txq->txq_producer = txq->txq_first; 1720 txq->txq_free = txq->txq_last - txq->txq_first - 1; 1721 txq->txq_threshold = txq->txq_free / 2; 1722 txq->txq_lastintr = 0; 1723 1724 /* 1725 * What do we want to get interrupted on? 1726 */ 1727 sc->sc_intmask |= XMTINT_0 | XMTUF; 1728 1729 /* 1730 * Restart the transmiter at the first descriptor 1731 */ 1732 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo, 1733 txq->txq_descmap->dm_segs->ds_addr); 1734 } 1735 1736 static void 1737 bcmeth_ifstart(struct ifnet *ifp) 1738 { 1739 struct bcmeth_softc * const sc = ifp->if_softc; 1740 1741 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 1742 return; 1743 } 1744 1745 #ifdef BCMETH_MPSAFETX 1746 if (cpu_intr_p()) { 1747 #endif 1748 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 1749 softint_schedule(sc->sc_soft_ih); 1750 #ifdef BCMETH_MPSAFETX 1751 } else { 1752 /* 1753 * Either we are in a softintr thread already or some other 1754 * thread so just borrow it to do the send and save ourselves 1755 * the overhead of a fast soft int. 1756 */ 1757 bcmeth_soft_txintr(sc); 1758 } 1759 #endif 1760 } 1761 1762 int 1763 bcmeth_intr(void *arg) 1764 { 1765 struct bcmeth_softc * const sc = arg; 1766 uint32_t soft_flags = 0; 1767 uint32_t work_flags = 0; 1768 int rv = 0; 1769 1770 mutex_enter(sc->sc_hwlock); 1771 1772 uint32_t intmask = sc->sc_intmask; 1773 BCMETH_EVCNT_INCR(sc->sc_ev_intr); 1774 1775 for (;;) { 1776 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 1777 intstatus &= intmask; 1778 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */ 1779 if (intstatus == 0) { 1780 break; 1781 } 1782 #if 0 1783 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n", 1784 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK)); 1785 #endif 1786 if (intstatus & RCVINT) { 1787 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1788 intmask &= ~RCVINT; 1789 1790 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1791 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1792 if (descs < rxq->rxq_consumer - rxq->rxq_first) { 1793 /* 1794 * We wrapped at the end so count how far 1795 * we are from the end. 1796 */ 1797 descs += rxq->rxq_last - rxq->rxq_consumer; 1798 } else { 1799 descs -= rxq->rxq_consumer - rxq->rxq_first; 1800 } 1801 /* 1802 * If we "timedout" we can't be hogging so use 1803 * softints. If we exceeded then we might hogging 1804 * so let the workqueue deal with them. 1805 */ 1806 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT); 1807 if (descs < framecount 1808 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) { 1809 soft_flags |= SOFT_RXINTR; 1810 } else { 1811 work_flags |= WORK_RXINTR; 1812 } 1813 } 1814 1815 if (intstatus & XMTINT_0) { 1816 intmask &= ~XMTINT_0; 1817 soft_flags |= SOFT_TXINTR; 1818 } 1819 1820 if (intstatus & RCVDESCUF) { 1821 intmask &= ~RCVDESCUF; 1822 work_flags |= WORK_RXUNDERFLOW; 1823 } 1824 1825 intstatus &= intmask; 1826 if (intstatus) { 1827 aprint_error_dev(sc->sc_dev, 1828 "intr: intstatus=%#x\n", intstatus); 1829 aprint_error_dev(sc->sc_dev, 1830 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n", 1831 sc->sc_rxq.rxq_first, 1832 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr, 1833 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr), 1834 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0), 1835 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1)); 1836 aprint_error_dev(sc->sc_dev, 1837 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n", 1838 sc->sc_txq.txq_first, 1839 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr, 1840 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr), 1841 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0), 1842 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1)); 1843 intmask &= ~intstatus; 1844 work_flags |= WORK_REINIT; 1845 break; 1846 } 1847 } 1848 1849 if (intmask != sc->sc_intmask) { 1850 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1851 } 1852 1853 if (work_flags) { 1854 if (sc->sc_work_flags == 0) { 1855 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL); 1856 } 1857 atomic_or_32(&sc->sc_work_flags, work_flags); 1858 rv = 1; 1859 } 1860 1861 if (soft_flags) { 1862 if (sc->sc_soft_flags == 0) { 1863 softint_schedule(sc->sc_soft_ih); 1864 } 1865 atomic_or_32(&sc->sc_soft_flags, soft_flags); 1866 rv = 1; 1867 } 1868 1869 mutex_exit(sc->sc_hwlock); 1870 1871 return rv; 1872 } 1873 1874 #ifdef BCMETH_MPSAFETX 1875 void 1876 bcmeth_soft_txintr(struct bcmeth_softc *sc) 1877 { 1878 mutex_enter(sc->sc_lock); 1879 /* 1880 * Let's do what we came here for. Consume transmitted 1881 * packets off the the transmit ring. 1882 */ 1883 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1884 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1885 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1886 sc->sc_if.if_flags |= IFF_OACTIVE; 1887 } else { 1888 sc->sc_if.if_flags &= ~IFF_OACTIVE; 1889 } 1890 if (sc->sc_if.if_flags & IFF_RUNNING) { 1891 mutex_spin_enter(sc->sc_hwlock); 1892 sc->sc_intmask |= XMTINT_0; 1893 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1894 mutex_spin_exit(sc->sc_hwlock); 1895 } 1896 mutex_exit(sc->sc_lock); 1897 } 1898 #endif /* BCMETH_MPSAFETX */ 1899 1900 void 1901 bcmeth_soft_intr(void *arg) 1902 { 1903 struct bcmeth_softc * const sc = arg; 1904 struct ifnet * const ifp = &sc->sc_if; 1905 uint32_t intmask = 0; 1906 1907 mutex_enter(sc->sc_lock); 1908 1909 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 1910 1911 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr); 1912 1913 if ((soft_flags & SOFT_TXINTR) 1914 || bcmeth_txq_active_p(sc, &sc->sc_txq)) { 1915 /* 1916 * Let's do what we came here for. Consume transmitted 1917 * packets off the the transmit ring. 1918 */ 1919 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1920 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1921 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1922 ifp->if_flags |= IFF_OACTIVE; 1923 } else { 1924 ifp->if_flags &= ~IFF_OACTIVE; 1925 } 1926 intmask |= XMTINT_0; 1927 } 1928 1929 if (soft_flags & SOFT_RXINTR) { 1930 /* 1931 * Let's consume 1932 */ 1933 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1934 sc->sc_rxq.rxq_threshold / 4)) { 1935 /* 1936 * We've consumed a quarter of the ring and still have 1937 * more to do. Refill the ring. 1938 */ 1939 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1940 } 1941 intmask |= RCVINT; 1942 } 1943 1944 if (ifp->if_flags & IFF_RUNNING) { 1945 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1946 mutex_spin_enter(sc->sc_hwlock); 1947 sc->sc_intmask |= intmask; 1948 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1949 mutex_spin_exit(sc->sc_hwlock); 1950 } 1951 1952 mutex_exit(sc->sc_lock); 1953 } 1954 1955 void 1956 bcmeth_worker(struct work *wk, void *arg) 1957 { 1958 struct bcmeth_softc * const sc = arg; 1959 struct ifnet * const ifp = &sc->sc_if; 1960 uint32_t intmask = 0; 1961 1962 mutex_enter(sc->sc_lock); 1963 1964 BCMETH_EVCNT_INCR(sc->sc_ev_work); 1965 1966 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0); 1967 if (work_flags & WORK_REINIT) { 1968 int s = splnet(); 1969 sc->sc_soft_flags = 0; 1970 bcmeth_ifinit(ifp); 1971 splx(s); 1972 work_flags &= ~WORK_RXUNDERFLOW; 1973 } 1974 1975 if (work_flags & WORK_RXUNDERFLOW) { 1976 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1977 size_t threshold = 5 * rxq->rxq_threshold / 4; 1978 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 1979 threshold = rxq->rxq_last - rxq->rxq_first - 1; 1980 } else { 1981 intmask |= RCVDESCUF; 1982 } 1983 aprint_normal_dev(sc->sc_dev, 1984 "increasing receive buffers from %zu to %zu\n", 1985 rxq->rxq_threshold, threshold); 1986 rxq->rxq_threshold = threshold; 1987 } 1988 1989 if (work_flags & WORK_RXINTR) { 1990 /* 1991 * Let's consume 1992 */ 1993 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1994 sc->sc_rxq.rxq_threshold / 4)) { 1995 /* 1996 * We've consumed a quarter of the ring and still have 1997 * more to do. Refill the ring. 1998 */ 1999 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2000 } 2001 intmask |= RCVINT; 2002 } 2003 2004 if (ifp->if_flags & IFF_RUNNING) { 2005 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2006 #if 0 2007 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 2008 if (intstatus & RCVINT) { 2009 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT); 2010 work_flags |= WORK_RXINTR; 2011 continue; 2012 } 2013 #endif 2014 mutex_spin_enter(sc->sc_hwlock); 2015 sc->sc_intmask |= intmask; 2016 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 2017 mutex_spin_exit(sc->sc_hwlock); 2018 } 2019 2020 mutex_exit(sc->sc_lock); 2021 } 2022