1 /*- 2 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas of 3am Software Foundry. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #define _ARM32_BUS_DMA_PRIVATE 31 #define GMAC_PRIVATE 32 33 #include "locators.h" 34 #include "opt_broadcom.h" 35 36 #include <sys/cdefs.h> 37 38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.42 2022/09/17 19:41:18 thorpej Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/atomic.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/ioctl.h> 45 #include <sys/intr.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/workqueue.h> 51 52 #include <net/if.h> 53 #include <net/if_ether.h> 54 #include <net/if_media.h> 55 #include <net/if_dl.h> 56 #include <net/bpf.h> 57 58 #include <dev/mii/miivar.h> 59 60 #include <arm/locore.h> 61 62 #include <arm/broadcom/bcm53xx_reg.h> 63 #include <arm/broadcom/bcm53xx_var.h> 64 65 //#define BCMETH_MPSAFE 66 67 #ifdef BCMETH_COUNTERS 68 #define BCMETH_EVCNT_ADD(a, b) ((void)((a).ev_count += (b))) 69 #else 70 #define BCMETH_EVCNT_ADD(a, b) do { } while (/*CONSTCOND*/0) 71 #endif 72 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1) 73 74 #define BCMETH_MAXTXMBUFS 128 75 #define BCMETH_NTXSEGS 30 76 #define BCMETH_MAXRXMBUFS 255 77 #define BCMETH_MINRXMBUFS 64 78 #define BCMETH_NRXSEGS 1 79 #define BCMETH_RINGSIZE PAGE_SIZE 80 81 #if 1 82 #define BCMETH_RCVMAGIC 0xfeedface 83 #endif 84 85 static int bcmeth_ccb_match(device_t, cfdata_t, void *); 86 static void bcmeth_ccb_attach(device_t, device_t, void *); 87 88 struct bcmeth_txqueue { 89 bus_dmamap_t txq_descmap; 90 struct gmac_txdb *txq_consumer; 91 struct gmac_txdb *txq_producer; 92 struct gmac_txdb *txq_first; 93 struct gmac_txdb *txq_last; 94 struct ifqueue txq_mbufs; 95 struct mbuf *txq_next; 96 size_t txq_free; 97 size_t txq_threshold; 98 size_t txq_lastintr; 99 bus_size_t txq_reg_xmtaddrlo; 100 bus_size_t txq_reg_xmtptr; 101 bus_size_t txq_reg_xmtctl; 102 bus_size_t txq_reg_xmtsts0; 103 bus_size_t txq_reg_xmtsts1; 104 bus_dma_segment_t txq_descmap_seg; 105 }; 106 107 struct bcmeth_rxqueue { 108 bus_dmamap_t rxq_descmap; 109 struct gmac_rxdb *rxq_consumer; 110 struct gmac_rxdb *rxq_producer; 111 struct gmac_rxdb *rxq_first; 112 struct gmac_rxdb *rxq_last; 113 struct mbuf *rxq_mhead; 114 struct mbuf **rxq_mtail; 115 struct mbuf *rxq_mconsumer; 116 size_t rxq_inuse; 117 size_t rxq_threshold; 118 bus_size_t rxq_reg_rcvaddrlo; 119 bus_size_t rxq_reg_rcvptr; 120 bus_size_t rxq_reg_rcvctl; 121 bus_size_t rxq_reg_rcvsts0; 122 bus_size_t rxq_reg_rcvsts1; 123 bus_dma_segment_t rxq_descmap_seg; 124 }; 125 126 struct bcmeth_mapcache { 127 u_int dmc_nmaps; 128 u_int dmc_maxseg; 129 u_int dmc_maxmaps; 130 u_int dmc_maxmapsize; 131 bus_dmamap_t dmc_maps[0]; 132 }; 133 134 struct bcmeth_softc { 135 device_t sc_dev; 136 bus_space_tag_t sc_bst; 137 bus_space_handle_t sc_bsh; 138 bus_dma_tag_t sc_dmat; 139 kmutex_t *sc_lock; 140 kmutex_t *sc_hwlock; 141 struct ethercom sc_ec; 142 #define sc_if sc_ec.ec_if 143 struct ifmedia sc_media; 144 void *sc_soft_ih; 145 void *sc_ih; 146 147 struct bcmeth_rxqueue sc_rxq; 148 struct bcmeth_txqueue sc_txq; 149 150 size_t sc_rcvoffset; 151 uint32_t sc_macaddr[2]; 152 uint32_t sc_maxfrm; 153 uint32_t sc_cmdcfg; 154 uint32_t sc_intmask; 155 uint32_t sc_rcvlazy; 156 volatile uint32_t sc_soft_flags; 157 #define SOFT_RXINTR 0x01 158 #define SOFT_TXINTR 0x02 159 160 #ifdef BCMETH_COUNTERS 161 struct evcnt sc_ev_intr; 162 struct evcnt sc_ev_soft_intr; 163 struct evcnt sc_ev_work; 164 struct evcnt sc_ev_tx_stall; 165 struct evcnt sc_ev_rx_badmagic_lo; 166 struct evcnt sc_ev_rx_badmagic_hi; 167 #endif 168 169 struct ifqueue sc_rx_bufcache; 170 struct bcmeth_mapcache *sc_rx_mapcache; 171 struct bcmeth_mapcache *sc_tx_mapcache; 172 173 struct workqueue *sc_workq; 174 struct work sc_work; 175 176 volatile uint32_t sc_work_flags; 177 #define WORK_RXINTR 0x01 178 #define WORK_RXUNDERFLOW 0x02 179 #define WORK_REINIT 0x04 180 181 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 182 }; 183 184 static void bcmeth_ifstart(struct ifnet *); 185 static void bcmeth_ifwatchdog(struct ifnet *); 186 static int bcmeth_ifinit(struct ifnet *); 187 static void bcmeth_ifstop(struct ifnet *, int); 188 static int bcmeth_ifioctl(struct ifnet *, u_long, void *); 189 190 static int bcmeth_mapcache_create(struct bcmeth_softc *, 191 struct bcmeth_mapcache **, size_t, size_t, size_t); 192 static void bcmeth_mapcache_destroy(struct bcmeth_softc *, 193 struct bcmeth_mapcache *); 194 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *, 195 struct bcmeth_mapcache *); 196 static void bcmeth_mapcache_put(struct bcmeth_softc *, 197 struct bcmeth_mapcache *, bus_dmamap_t); 198 199 static int bcmeth_txq_attach(struct bcmeth_softc *, 200 struct bcmeth_txqueue *, u_int); 201 static void bcmeth_txq_purge(struct bcmeth_softc *, 202 struct bcmeth_txqueue *); 203 static void bcmeth_txq_reset(struct bcmeth_softc *, 204 struct bcmeth_txqueue *); 205 static bool bcmeth_txq_consume(struct bcmeth_softc *, 206 struct bcmeth_txqueue *); 207 static bool bcmeth_txq_produce(struct bcmeth_softc *, 208 struct bcmeth_txqueue *, struct mbuf *m); 209 static bool bcmeth_txq_active_p(struct bcmeth_softc *, 210 struct bcmeth_txqueue *); 211 212 static int bcmeth_rxq_attach(struct bcmeth_softc *, 213 struct bcmeth_rxqueue *, u_int); 214 static bool bcmeth_rxq_produce(struct bcmeth_softc *, 215 struct bcmeth_rxqueue *); 216 static void bcmeth_rxq_purge(struct bcmeth_softc *, 217 struct bcmeth_rxqueue *, bool); 218 static void bcmeth_rxq_reset(struct bcmeth_softc *, 219 struct bcmeth_rxqueue *); 220 221 static int bcmeth_intr(void *); 222 #ifdef BCMETH_MPSAFETX 223 static void bcmeth_soft_txintr(struct bcmeth_softc *); 224 #endif 225 static void bcmeth_soft_intr(void *); 226 static void bcmeth_worker(struct work *, void *); 227 228 static int bcmeth_mediachange(struct ifnet *); 229 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *); 230 231 static inline uint32_t 232 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o) 233 { 234 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o); 235 } 236 237 static inline void 238 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v) 239 { 240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v); 241 } 242 243 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc), 244 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL); 245 246 static int 247 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux) 248 { 249 struct bcmccb_attach_args * const ccbaa = aux; 250 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 251 252 if (strcmp(cf->cf_name, loc->loc_name)) 253 return 0; 254 255 #ifdef DIAGNOSTIC 256 const int port = cf->cf_loc[BCMCCBCF_PORT]; 257 #endif 258 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port); 259 260 return 1; 261 } 262 263 static void 264 bcmeth_ccb_attach(device_t parent, device_t self, void *aux) 265 { 266 struct bcmeth_softc * const sc = device_private(self); 267 struct ethercom * const ec = &sc->sc_ec; 268 struct ifnet * const ifp = &ec->ec_if; 269 struct bcmccb_attach_args * const ccbaa = aux; 270 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 271 const char * const xname = device_xname(self); 272 prop_dictionary_t dict = device_properties(self); 273 int error; 274 275 sc->sc_bst = ccbaa->ccbaa_ccb_bst; 276 sc->sc_dmat = ccbaa->ccbaa_dmat; 277 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh, 278 loc->loc_offset, loc->loc_size, &sc->sc_bsh); 279 280 /* 281 * We need to use the coherent dma tag for the GMAC. 282 */ 283 sc->sc_dmat = &bcm53xx_coherent_dma_tag; 284 #if _ARM32_NEED_BUS_DMA_BOUNCE 285 if (device_cfdata(self)->cf_flags & 2) { 286 sc->sc_dmat = &bcm53xx_bounce_dma_tag; 287 } 288 #endif 289 290 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address"); 291 if (eaprop == NULL) { 292 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0); 293 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1); 294 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) { 295 aprint_error(": mac-address property is missing\n"); 296 return; 297 } 298 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff; 299 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff; 300 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff; 301 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff; 302 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff; 303 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff; 304 } else { 305 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 306 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 307 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop), 308 ETHER_ADDR_LEN); 309 } 310 sc->sc_dev = self; 311 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 312 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 313 314 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts 315 316 aprint_naive("\n"); 317 aprint_normal(": Gigabit Ethernet Controller\n"); 318 319 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0); 320 if (error) { 321 aprint_error(": failed to init rxq: %d\n", error); 322 goto fail_1; 323 } 324 325 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 326 if (error) { 327 aprint_error(": failed to init txq: %d\n", error); 328 goto fail_1; 329 } 330 331 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache, 332 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS); 333 if (error) { 334 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 335 goto fail_1; 336 } 337 338 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 339 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS); 340 if (error) { 341 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 342 goto fail_1; 343 } 344 345 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc, 346 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU); 347 if (error) { 348 aprint_error(": failed to create workqueue: %d\n", error); 349 goto fail_2; 350 } 351 352 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET, 353 bcmeth_soft_intr, sc); 354 355 if (sc->sc_ih == NULL) { 356 aprint_error_dev(self, "failed to establish interrupt %d\n", 357 loc->loc_intrs[0]); 358 goto fail_3; 359 } 360 361 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL, 362 bcmeth_intr, sc); 363 364 if (sc->sc_ih == NULL) { 365 aprint_error_dev(self, "failed to establish interrupt %d\n", 366 loc->loc_intrs[0]); 367 goto fail_4; 368 } else { 369 aprint_normal_dev(self, "interrupting on irq %d\n", 370 loc->loc_intrs[0]); 371 } 372 373 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 374 ether_sprintf(sc->sc_enaddr)); 375 376 /* 377 * Since each port in plugged into the switch/flow-accelerator, 378 * we hard code at Gige Full-Duplex with Flow Control enabled. 379 */ 380 int ifmedia = IFM_ETHER | IFM_1000_T | IFM_FDX; 381 //ifmedia |= IFM_FLOW | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 382 ec->ec_ifmedia = &sc->sc_media; 383 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange, 384 bcmeth_mediastatus); 385 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL); 386 ifmedia_set(&sc->sc_media, ifmedia); 387 388 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 389 390 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 391 ifp->if_softc = sc; 392 ifp->if_baudrate = IF_Mbps(1000); 393 ifp->if_capabilities = 0; 394 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 395 #ifdef BCMETH_MPSAFE 396 ifp->if_flags2 = IFF2_MPSAFE; 397 #endif 398 ifp->if_ioctl = bcmeth_ifioctl; 399 ifp->if_start = bcmeth_ifstart; 400 ifp->if_watchdog = bcmeth_ifwatchdog; 401 ifp->if_init = bcmeth_ifinit; 402 ifp->if_stop = bcmeth_ifstop; 403 IFQ_SET_READY(&ifp->if_snd); 404 405 bcmeth_ifstop(ifp, true); 406 407 /* 408 * Attach the interface. 409 */ 410 if_initialize(ifp); 411 ether_ifattach(ifp, sc->sc_enaddr); 412 if_register(ifp); 413 414 #ifdef BCMETH_COUNTERS 415 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 416 NULL, xname, "intr"); 417 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 418 NULL, xname, "soft intr"); 419 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC, 420 NULL, xname, "work items"); 421 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 422 NULL, xname, "tx stalls"); 423 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC, 424 NULL, xname, "rx badmagic lo"); 425 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC, 426 NULL, xname, "rx badmagic hi"); 427 #endif 428 429 return; 430 431 fail_4: 432 intr_disestablish(sc->sc_ih); 433 fail_3: 434 softint_disestablish(sc->sc_soft_ih); 435 fail_2: 436 workqueue_destroy(sc->sc_workq); 437 fail_1: 438 mutex_obj_free(sc->sc_lock); 439 mutex_obj_free(sc->sc_hwlock); 440 } 441 442 static int 443 bcmeth_mediachange(struct ifnet *ifp) 444 { 445 //struct bcmeth_softc * const sc = ifp->if_softc; 446 return 0; 447 } 448 449 static void 450 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm) 451 { 452 //struct bcmeth_softc * const sc = ifp->if_softc; 453 454 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE; 455 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T; 456 } 457 458 static uint64_t 459 bcmeth_macaddr_create(const uint8_t *enaddr) 460 { 461 return (enaddr[3] << 0) // UNIMAC_MAC_0 462 | (enaddr[2] << 8) // UNIMAC_MAC_0 463 | (enaddr[1] << 16) // UNIMAC_MAC_0 464 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0 465 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1 466 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1 467 } 468 469 static int 470 bcmeth_ifinit(struct ifnet *ifp) 471 { 472 struct bcmeth_softc * const sc = ifp->if_softc; 473 int error = 0; 474 475 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 476 if (ifp->if_mtu > ETHERMTU_JUMBO) 477 return error; 478 479 KASSERT(ifp->if_flags & IFF_UP); 480 481 /* 482 * Stop the interface 483 */ 484 bcmeth_ifstop(ifp, 0); 485 486 /* 487 * Reserve enough space at the front so that we can insert a maxsized 488 * link header and a VLAN tag. Also make sure we have enough room for 489 * the rcvsts field as well. 490 */ 491 KASSERT(ALIGN(max_linkhdr) == max_linkhdr); 492 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu", 493 max_linkhdr, sizeof(struct ether_header)); 494 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header); 495 if (sc->sc_rcvoffset <= 4) 496 sc->sc_rcvoffset += 4; 497 KASSERT((sc->sc_rcvoffset & 3) == 2); 498 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET)); 499 KASSERT(sc->sc_rcvoffset >= 6); 500 501 /* 502 * If our frame size has changed (or it's our first time through) 503 * destroy the existing transmit mapcache. 504 */ 505 if (sc->sc_tx_mapcache != NULL 506 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 507 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache); 508 sc->sc_tx_mapcache = NULL; 509 } 510 511 if (sc->sc_tx_mapcache == NULL) { 512 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 513 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS); 514 if (error) 515 return error; 516 } 517 518 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE 519 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED) 520 | RX_ENA | TX_ENA; 521 522 if (ifp->if_flags & IFF_PROMISC) { 523 sc->sc_cmdcfg |= PROMISC_EN; 524 } else { 525 sc->sc_cmdcfg &= ~PROMISC_EN; 526 } 527 528 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl); 529 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr); 530 531 /* 532 * We make sure that a received Ethernet packet start on a non-word 533 * boundary so that the packet payload will be on a word boundary. 534 * So to check the destination address we keep around two words to 535 * quickly compare with. 536 */ 537 #if __ARMEL__ 538 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8); 539 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8) 540 | (lladdr[4] << 16) | (lladdr[5] << 24); 541 #else 542 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8); 543 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8) 544 | (lladdr[1] << 16) | (lladdr[2] << 24); 545 #endif 546 547 sc->sc_intmask = DESCPROTOERR | DATAERR | DESCERR; 548 549 /* 5. Load RCVADDR_LO with new pointer */ 550 bcmeth_rxq_reset(sc, &sc->sc_rxq); 551 552 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 553 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET) 554 | RCVCTL_PARITY_DIS 555 | RCVCTL_OFLOW_CONTINUE 556 | __SHIFTIN(3, RCVCTL_BURSTLEN)); 557 558 /* 6. Load XMTADDR_LO with new pointer */ 559 bcmeth_txq_reset(sc, &sc->sc_txq); 560 561 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 562 | XMTCTL_PARITY_DIS 563 | __SHIFTIN(3, XMTCTL_BURSTLEN)); 564 565 /* 7. Setup other UNIMAC registers */ 566 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm); 567 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0)); 568 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32)); 569 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg); 570 571 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL); 572 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE; 573 devctl &= ~FLOW_CTRL_MODE; 574 devctl &= ~MIB_RD_RESET_EN; 575 devctl &= ~RXQ_OVERFLOW_CTRL_SEL; 576 devctl &= ~CPU_FLOW_CTRL_ON; 577 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl); 578 579 /* Setup lazy receive (at most 1ms). */ 580 const struct cpu_softc * const cpu = curcpu()->ci_softc; 581 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT) 582 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT); 583 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy); 584 585 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 586 sc->sc_intmask |= XMTINT_0 | XMTUF; 587 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 588 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 589 590 591 /* 12. Enable receive queues in RQUEUE, */ 592 sc->sc_intmask |= RCVINT | RCVDESCUF | RCVFIFOOF; 593 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 594 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE); 595 596 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 597 598 #if 0 599 aprint_normal_dev(sc->sc_dev, 600 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n", 601 devctl, sc->sc_cmdcfg, 602 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 603 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl)); 604 #endif 605 606 sc->sc_soft_flags = 0; 607 608 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 609 610 ifp->if_flags |= IFF_RUNNING; 611 612 return error; 613 } 614 615 static void 616 bcmeth_ifstop(struct ifnet *ifp, int disable) 617 { 618 struct bcmeth_softc * const sc = ifp->if_softc; 619 struct bcmeth_txqueue * const txq = &sc->sc_txq; 620 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 621 622 KASSERT(!cpu_intr_p()); 623 624 sc->sc_soft_flags = 0; 625 sc->sc_work_flags = 0; 626 627 /* Disable Rx processing */ 628 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl, 629 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE); 630 631 /* Disable Tx processing */ 632 bcmeth_write_4(sc, txq->txq_reg_xmtctl, 633 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE); 634 635 /* Disable all interrupts */ 636 bcmeth_write_4(sc, GMAC_INTMASK, 0); 637 638 for (;;) { 639 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 640 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 641 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS 642 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS) 643 break; 644 delay(50); 645 } 646 /* 647 * Now reset the controller. 648 * 649 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register 650 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register 651 */ 652 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET); 653 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0); 654 sc->sc_intmask = 0; 655 ifp->if_flags &= ~IFF_RUNNING; 656 657 /* 658 * Let's consume any remaining transmitted packets. And if we are 659 * disabling the interface, purge ourselves of any untransmitted 660 * packets. But don't consume any received packets, just drop them. 661 * If we aren't disabling the interface, save the mbufs in the 662 * receive queue for reuse. 663 */ 664 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable); 665 bcmeth_txq_consume(sc, &sc->sc_txq); 666 if (disable) { 667 bcmeth_txq_purge(sc, &sc->sc_txq); 668 IF_PURGE(&ifp->if_snd); 669 } 670 671 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0); 672 } 673 674 static void 675 bcmeth_ifwatchdog(struct ifnet *ifp) 676 { 677 } 678 679 static int 680 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 681 { 682 const int s = splnet(); 683 int error; 684 685 switch (cmd) { 686 default: 687 error = ether_ioctl(ifp, cmd, data); 688 if (error != ENETRESET) 689 break; 690 691 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 692 error = 0; 693 break; 694 } 695 error = bcmeth_ifinit(ifp); 696 break; 697 } 698 699 splx(s); 700 return error; 701 } 702 703 static void 704 bcmeth_rxq_desc_presync( 705 struct bcmeth_softc *sc, 706 struct bcmeth_rxqueue *rxq, 707 struct gmac_rxdb *rxdb, 708 size_t count) 709 { 710 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 711 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 712 BUS_DMASYNC_PREWRITE); 713 } 714 715 static void 716 bcmeth_rxq_desc_postsync( 717 struct bcmeth_softc *sc, 718 struct bcmeth_rxqueue *rxq, 719 struct gmac_rxdb *rxdb, 720 size_t count) 721 { 722 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 723 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 724 BUS_DMASYNC_POSTWRITE); 725 } 726 727 static void 728 bcmeth_txq_desc_presync( 729 struct bcmeth_softc *sc, 730 struct bcmeth_txqueue *txq, 731 struct gmac_txdb *txdb, 732 size_t count) 733 { 734 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 735 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 736 BUS_DMASYNC_PREWRITE); 737 } 738 739 static void 740 bcmeth_txq_desc_postsync( 741 struct bcmeth_softc *sc, 742 struct bcmeth_txqueue *txq, 743 struct gmac_txdb *txdb, 744 size_t count) 745 { 746 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 747 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 748 BUS_DMASYNC_POSTWRITE); 749 } 750 751 static bus_dmamap_t 752 bcmeth_mapcache_get( 753 struct bcmeth_softc *sc, 754 struct bcmeth_mapcache *dmc) 755 { 756 KASSERT(dmc->dmc_nmaps > 0); 757 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 758 return dmc->dmc_maps[--dmc->dmc_nmaps]; 759 } 760 761 static void 762 bcmeth_mapcache_put( 763 struct bcmeth_softc *sc, 764 struct bcmeth_mapcache *dmc, 765 bus_dmamap_t map) 766 { 767 KASSERT(map != NULL); 768 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 769 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 770 } 771 772 static void 773 bcmeth_mapcache_destroy( 774 struct bcmeth_softc *sc, 775 struct bcmeth_mapcache *dmc) 776 { 777 const size_t dmc_size = 778 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]); 779 780 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 781 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 782 } 783 kmem_intr_free(dmc, dmc_size); 784 } 785 786 static int 787 bcmeth_mapcache_create( 788 struct bcmeth_softc *sc, 789 struct bcmeth_mapcache **dmc_p, 790 size_t maxmaps, 791 size_t maxmapsize, 792 size_t maxseg) 793 { 794 const size_t dmc_size = 795 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]); 796 struct bcmeth_mapcache * const dmc = 797 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 798 799 dmc->dmc_maxmaps = maxmaps; 800 dmc->dmc_nmaps = maxmaps; 801 dmc->dmc_maxmapsize = maxmapsize; 802 dmc->dmc_maxseg = maxseg; 803 804 for (u_int i = 0; i < maxmaps; i++) { 805 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 806 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 807 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 808 if (error) { 809 aprint_error_dev(sc->sc_dev, 810 "failed to creat dma map cache " 811 "entry %u of %zu: %d\n", 812 i, maxmaps, error); 813 while (i-- > 0) { 814 bus_dmamap_destroy(sc->sc_dmat, 815 dmc->dmc_maps[i]); 816 } 817 kmem_intr_free(dmc, dmc_size); 818 return error; 819 } 820 KASSERT(dmc->dmc_maps[i] != NULL); 821 } 822 823 *dmc_p = dmc; 824 825 return 0; 826 } 827 828 #if 0 829 static void 830 bcmeth_dmamem_free( 831 bus_dma_tag_t dmat, 832 size_t map_size, 833 bus_dma_segment_t *seg, 834 bus_dmamap_t map, 835 void *kvap) 836 { 837 bus_dmamap_destroy(dmat, map); 838 bus_dmamem_unmap(dmat, kvap, map_size); 839 bus_dmamem_free(dmat, seg, 1); 840 } 841 #endif 842 843 static int 844 bcmeth_dmamem_alloc( 845 bus_dma_tag_t dmat, 846 size_t map_size, 847 bus_dma_segment_t *seg, 848 bus_dmamap_t *map, 849 void **kvap) 850 { 851 int error; 852 int nseg; 853 854 *kvap = NULL; 855 *map = NULL; 856 857 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0, 858 seg, 1, &nseg, 0); 859 if (error) 860 return error; 861 862 KASSERT(nseg == 1); 863 864 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0); 865 if (error == 0) { 866 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 867 map); 868 if (error == 0) { 869 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 870 NULL, 0); 871 if (error == 0) 872 return 0; 873 bus_dmamap_destroy(dmat, *map); 874 *map = NULL; 875 } 876 bus_dmamem_unmap(dmat, *kvap, map_size); 877 *kvap = NULL; 878 } 879 bus_dmamem_free(dmat, seg, nseg); 880 return 0; 881 } 882 883 static struct mbuf * 884 bcmeth_rx_buf_alloc( 885 struct bcmeth_softc *sc) 886 { 887 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 888 if (m == NULL) { 889 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 890 return NULL; 891 } 892 MCLGET(m, M_DONTWAIT); 893 if ((m->m_flags & M_EXT) == 0) { 894 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 895 m_freem(m); 896 return NULL; 897 } 898 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 899 900 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache); 901 if (map == NULL) { 902 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 903 m_freem(m); 904 return NULL; 905 } 906 M_SETCTX(m, map); 907 m->m_len = m->m_pkthdr.len = MCLBYTES; 908 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 909 BUS_DMA_READ | BUS_DMA_NOWAIT); 910 if (error) { 911 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 912 error); 913 M_SETCTX(m, NULL); 914 m_freem(m); 915 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 916 return NULL; 917 } 918 KASSERT(map->dm_mapsize == MCLBYTES); 919 #ifdef BCMETH_RCVMAGIC 920 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC); 921 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t), 922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 923 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t), 924 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD); 925 #else 926 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 927 BUS_DMASYNC_PREREAD); 928 #endif 929 930 return m; 931 } 932 933 static void 934 bcmeth_rx_map_unload( 935 struct bcmeth_softc *sc, 936 struct mbuf *m) 937 { 938 KASSERT(m); 939 for (; m != NULL; m = m->m_next) { 940 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 941 KASSERT(map); 942 KASSERT(map->dm_mapsize == MCLBYTES); 943 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 944 BUS_DMASYNC_POSTREAD); 945 bus_dmamap_unload(sc->sc_dmat, map); 946 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 947 M_SETCTX(m, NULL); 948 } 949 } 950 951 static bool 952 bcmeth_rxq_produce( 953 struct bcmeth_softc *sc, 954 struct bcmeth_rxqueue *rxq) 955 { 956 struct gmac_rxdb *producer = rxq->rxq_producer; 957 bool produced = false; 958 959 while (rxq->rxq_inuse < rxq->rxq_threshold) { 960 struct mbuf *m; 961 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 962 if (m == NULL) { 963 m = bcmeth_rx_buf_alloc(sc); 964 if (m == NULL) { 965 printf("%s: bcmeth_rx_buf_alloc failed\n", 966 __func__); 967 break; 968 } 969 } 970 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 971 KASSERT(map); 972 973 producer->rxdb_buflen = htole32(MCLBYTES); 974 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr); 975 producer->rxdb_flags &= htole32(RXDB_FLAG_ET); 976 *rxq->rxq_mtail = m; 977 rxq->rxq_mtail = &m->m_next; 978 m->m_len = MCLBYTES; 979 m->m_next = NULL; 980 rxq->rxq_inuse++; 981 if (++producer == rxq->rxq_last) { 982 membar_producer(); 983 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 984 rxq->rxq_last - rxq->rxq_producer); 985 producer = rxq->rxq_producer = rxq->rxq_first; 986 } 987 produced = true; 988 } 989 if (produced) { 990 membar_producer(); 991 if (producer != rxq->rxq_producer) { 992 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 993 producer - rxq->rxq_producer); 994 rxq->rxq_producer = producer; 995 } 996 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr, 997 rxq->rxq_descmap->dm_segs[0].ds_addr 998 + ((uintptr_t)producer & RCVPTR)); 999 } 1000 return true; 1001 } 1002 1003 static void 1004 bcmeth_rx_input( 1005 struct bcmeth_softc *sc, 1006 struct mbuf *m, 1007 uint32_t rxdb_flags) 1008 { 1009 struct ifnet * const ifp = &sc->sc_if; 1010 1011 bcmeth_rx_map_unload(sc, m); 1012 1013 m_adj(m, sc->sc_rcvoffset); 1014 1015 /* 1016 * If we are in promiscuous mode and this isn't a multicast, check the 1017 * destination address to make sure it matches our own. If it doesn't, 1018 * mark the packet as being received promiscuously. 1019 */ 1020 if ((sc->sc_cmdcfg & PROMISC_EN) 1021 && (m->m_data[0] & 1) == 0 1022 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0] 1023 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) { 1024 m->m_flags |= M_PROMISC; 1025 } 1026 m_set_rcvif(m, ifp); 1027 1028 /* 1029 * Let's give it to the network subsystm to deal with. 1030 */ 1031 #ifdef BCMETH_MPSAFE 1032 mutex_exit(sc->sc_lock); 1033 if_input(ifp, m); 1034 mutex_enter(sc->sc_lock); 1035 #else 1036 int s = splnet(); 1037 if_input(ifp, m); 1038 splx(s); 1039 #endif 1040 } 1041 1042 static bool 1043 bcmeth_rxq_consume( 1044 struct bcmeth_softc *sc, 1045 struct bcmeth_rxqueue *rxq, 1046 size_t atmost) 1047 { 1048 struct ifnet * const ifp = &sc->sc_if; 1049 struct gmac_rxdb *consumer = rxq->rxq_consumer; 1050 size_t rxconsumed = 0; 1051 bool didconsume = false; 1052 1053 while (atmost-- > 0) { 1054 if (consumer == rxq->rxq_producer) { 1055 KASSERT(rxq->rxq_inuse == 0); 1056 break; 1057 } 1058 1059 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1060 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1061 if (consumer == rxq->rxq_first + currdscr) { 1062 break; 1063 } 1064 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1); 1065 1066 /* 1067 * We own this packet again. Copy the rxsts word from it. 1068 */ 1069 rxconsumed++; 1070 didconsume = true; 1071 uint32_t rxsts; 1072 KASSERT(rxq->rxq_mhead != NULL); 1073 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t); 1074 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align, 1075 BUS_DMASYNC_POSTREAD); 1076 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4); 1077 rxsts = le32toh(rxsts); 1078 #if 0 1079 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd", 1080 currdscr, consumer - rxq->rxq_first); 1081 #endif 1082 1083 /* 1084 * Get the count of descriptors. Fetch the correct number 1085 * of mbufs. 1086 */ 1087 #ifdef BCMETH_RCVMAGIC 1088 size_t desc_count = rxsts != BCMETH_RCVMAGIC 1089 ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1; 1090 #else 1091 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1; 1092 #endif 1093 struct mbuf *m = rxq->rxq_mhead; 1094 struct mbuf *m_last = m; 1095 for (size_t i = 1; i < desc_count; i++) { 1096 if (++consumer == rxq->rxq_last) { 1097 consumer = rxq->rxq_first; 1098 } 1099 KASSERTMSG(consumer != rxq->rxq_first + currdscr, 1100 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u " 1101 "consumer=%zd", i, rxsts, desc_count, currdscr, 1102 consumer - rxq->rxq_first); 1103 m_last = m_last->m_next; 1104 } 1105 1106 /* 1107 * Now remove it/them from the list of enqueued mbufs. 1108 */ 1109 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1110 rxq->rxq_mtail = &rxq->rxq_mhead; 1111 m_last->m_next = NULL; 1112 1113 #ifdef BCMETH_RCVMAGIC 1114 if (rxsts == BCMETH_RCVMAGIC) { 1115 if_statinc(ifp, if_ierrors); 1116 if ((m->m_ext.ext_paddr >> 28) == 8) { 1117 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo); 1118 } else { 1119 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi); 1120 } 1121 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1122 } else 1123 #endif /* BCMETH_RCVMAGIC */ 1124 if (rxsts 1125 & (RXSTS_CRC_ERROR |RXSTS_OVERSIZED |RXSTS_PKT_OVERFLOW)) { 1126 aprint_error_dev(sc->sc_dev, 1127 "[%zu]: count=%zu rxsts=%#x\n", 1128 consumer - rxq->rxq_first, desc_count, rxsts); 1129 /* 1130 * We encountered an error, take the mbufs and add them 1131 * to the rx bufcache so we can quickly reuse them. 1132 */ 1133 if_statinc(ifp, if_ierrors); 1134 do { 1135 struct mbuf *m0 = m->m_next; 1136 m->m_next = NULL; 1137 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1138 m = m0; 1139 } while (m); 1140 } else { 1141 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN); 1142 framelen += sc->sc_rcvoffset; 1143 m->m_pkthdr.len = framelen; 1144 if (desc_count == 1) { 1145 KASSERT(framelen <= MCLBYTES); 1146 m->m_len = framelen; 1147 } else { 1148 m_last->m_len = framelen & (MCLBYTES - 1); 1149 } 1150 1151 #ifdef BCMETH_MPSAFE 1152 /* 1153 * Wrap at the last entry! 1154 */ 1155 if (++consumer == rxq->rxq_last) { 1156 KASSERT(consumer[-1].rxdb_flags 1157 & htole32(RXDB_FLAG_ET)); 1158 rxq->rxq_consumer = rxq->rxq_first; 1159 } else { 1160 rxq->rxq_consumer = consumer; 1161 } 1162 rxq->rxq_inuse -= rxconsumed; 1163 #endif /* BCMETH_MPSAFE */ 1164 1165 /* 1166 * Receive the packet (which releases our lock) 1167 */ 1168 bcmeth_rx_input(sc, m, rxsts); 1169 1170 #ifdef BCMETH_MPSAFE 1171 /* 1172 * Since we had to give up our lock, we need to 1173 * refresh these. 1174 */ 1175 consumer = rxq->rxq_consumer; 1176 rxconsumed = 0; 1177 continue; 1178 #endif /* BCMETH_MPSAFE */ 1179 } 1180 1181 /* 1182 * Wrap at the last entry! 1183 */ 1184 if (++consumer == rxq->rxq_last) { 1185 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1186 consumer = rxq->rxq_first; 1187 } 1188 } 1189 1190 /* 1191 * Update queue info. 1192 */ 1193 rxq->rxq_consumer = consumer; 1194 rxq->rxq_inuse -= rxconsumed; 1195 1196 /* 1197 * Did we consume anything? 1198 */ 1199 return didconsume; 1200 } 1201 1202 static void 1203 bcmeth_rxq_purge( 1204 struct bcmeth_softc *sc, 1205 struct bcmeth_rxqueue *rxq, 1206 bool discard) 1207 { 1208 struct mbuf *m; 1209 1210 if ((m = rxq->rxq_mhead) != NULL) { 1211 if (discard) { 1212 bcmeth_rx_map_unload(sc, m); 1213 m_freem(m); 1214 } else { 1215 while (m != NULL) { 1216 struct mbuf *m0 = m->m_next; 1217 m->m_next = NULL; 1218 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1219 m = m0; 1220 } 1221 } 1222 } 1223 1224 rxq->rxq_mhead = NULL; 1225 rxq->rxq_mtail = &rxq->rxq_mhead; 1226 rxq->rxq_inuse = 0; 1227 } 1228 1229 static void 1230 bcmeth_rxq_reset( 1231 struct bcmeth_softc *sc, 1232 struct bcmeth_rxqueue *rxq) 1233 { 1234 /* 1235 * sync all the descriptors 1236 */ 1237 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1238 rxq->rxq_last - rxq->rxq_first); 1239 1240 /* 1241 * Make sure we own all descriptors in the ring. 1242 */ 1243 struct gmac_rxdb *rxdb; 1244 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) { 1245 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC); 1246 } 1247 1248 /* 1249 * Last descriptor has the wrap flag. 1250 */ 1251 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET | RXDB_FLAG_IC); 1252 1253 /* 1254 * Reset the producer consumer indexes. 1255 */ 1256 rxq->rxq_consumer = rxq->rxq_first; 1257 rxq->rxq_producer = rxq->rxq_first; 1258 rxq->rxq_inuse = 0; 1259 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS) 1260 rxq->rxq_threshold = BCMETH_MINRXMBUFS; 1261 1262 sc->sc_intmask |= RCVINT | RCVFIFOOF | RCVDESCUF; 1263 1264 /* 1265 * Restart the receiver at the first descriptor 1266 */ 1267 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo, 1268 rxq->rxq_descmap->dm_segs[0].ds_addr); 1269 } 1270 1271 static int 1272 bcmeth_rxq_attach( 1273 struct bcmeth_softc *sc, 1274 struct bcmeth_rxqueue *rxq, 1275 u_int qno) 1276 { 1277 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]); 1278 int error; 1279 void *descs; 1280 1281 KASSERT(desc_count == 256 || desc_count == 512); 1282 1283 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1284 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1285 if (error) 1286 return error; 1287 1288 memset(descs, 0, BCMETH_RINGSIZE); 1289 rxq->rxq_first = descs; 1290 rxq->rxq_last = rxq->rxq_first + desc_count; 1291 rxq->rxq_consumer = descs; 1292 rxq->rxq_producer = descs; 1293 1294 bcmeth_rxq_purge(sc, rxq, true); 1295 bcmeth_rxq_reset(sc, rxq); 1296 1297 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW; 1298 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL; 1299 rxq->rxq_reg_rcvptr = GMAC_RCVPTR; 1300 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0; 1301 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1; 1302 1303 return 0; 1304 } 1305 1306 static bool 1307 bcmeth_txq_active_p( 1308 struct bcmeth_softc * const sc, 1309 struct bcmeth_txqueue *txq) 1310 { 1311 return !IF_IS_EMPTY(&txq->txq_mbufs); 1312 } 1313 1314 static bool 1315 bcmeth_txq_fillable_p( 1316 struct bcmeth_softc * const sc, 1317 struct bcmeth_txqueue *txq) 1318 { 1319 return txq->txq_free >= txq->txq_threshold; 1320 } 1321 1322 static int 1323 bcmeth_txq_attach( 1324 struct bcmeth_softc *sc, 1325 struct bcmeth_txqueue *txq, 1326 u_int qno) 1327 { 1328 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]); 1329 int error; 1330 void *descs; 1331 1332 KASSERT(desc_count == 256 || desc_count == 512); 1333 1334 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1335 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1336 if (error) 1337 return error; 1338 1339 memset(descs, 0, BCMETH_RINGSIZE); 1340 txq->txq_first = descs; 1341 txq->txq_last = txq->txq_first + desc_count; 1342 txq->txq_consumer = descs; 1343 txq->txq_producer = descs; 1344 1345 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS); 1346 1347 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW; 1348 txq->txq_reg_xmtctl = GMAC_XMTCONTROL; 1349 txq->txq_reg_xmtptr = GMAC_XMTPTR; 1350 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0; 1351 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1; 1352 1353 bcmeth_txq_reset(sc, txq); 1354 1355 return 0; 1356 } 1357 1358 static int 1359 bcmeth_txq_map_load( 1360 struct bcmeth_softc *sc, 1361 struct bcmeth_txqueue *txq, 1362 struct mbuf *m) 1363 { 1364 bus_dmamap_t map; 1365 int error; 1366 1367 map = M_GETCTX(m, bus_dmamap_t); 1368 if (map != NULL) 1369 return 0; 1370 1371 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache); 1372 if (map == NULL) 1373 return ENOMEM; 1374 1375 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1376 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1377 if (error) 1378 return error; 1379 1380 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1381 BUS_DMASYNC_PREWRITE); 1382 M_SETCTX(m, map); 1383 return 0; 1384 } 1385 1386 static void 1387 bcmeth_txq_map_unload( 1388 struct bcmeth_softc *sc, 1389 struct bcmeth_txqueue *txq, 1390 struct mbuf *m) 1391 { 1392 KASSERT(m); 1393 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1394 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1395 BUS_DMASYNC_POSTWRITE); 1396 bus_dmamap_unload(sc->sc_dmat, map); 1397 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map); 1398 } 1399 1400 static bool 1401 bcmeth_txq_produce( 1402 struct bcmeth_softc *sc, 1403 struct bcmeth_txqueue *txq, 1404 struct mbuf *m) 1405 { 1406 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1407 1408 if (map->dm_nsegs > txq->txq_free) 1409 return false; 1410 1411 /* 1412 * TCP Offload flag must be set in the first descriptor. 1413 */ 1414 struct gmac_txdb *producer = txq->txq_producer; 1415 uint32_t first_flags = TXDB_FLAG_SF; 1416 uint32_t last_flags = TXDB_FLAG_EF; 1417 1418 /* 1419 * If we've produced enough descriptors without consuming any 1420 * we need to ask for an interrupt to reclaim some. 1421 */ 1422 txq->txq_lastintr += map->dm_nsegs; 1423 if (txq->txq_lastintr >= txq->txq_threshold 1424 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1425 txq->txq_lastintr = 0; 1426 last_flags |= TXDB_FLAG_IC; 1427 } 1428 1429 KASSERT(producer != txq->txq_last); 1430 1431 struct gmac_txdb *start = producer; 1432 size_t count = map->dm_nsegs; 1433 producer->txdb_flags |= htole32(first_flags); 1434 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr); 1435 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len); 1436 for (u_int i = 1; i < map->dm_nsegs; i++) { 1437 #if 0 1438 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1439 le32toh(producer->txdb_flags), 1440 le32toh(producer->txdb_buflen), 1441 le32toh(producer->txdb_addrlo), 1442 le32toh(producer->txdb_addrhi)); 1443 #endif 1444 if (__predict_false(++producer == txq->txq_last)) { 1445 bcmeth_txq_desc_presync(sc, txq, start, 1446 txq->txq_last - start); 1447 count -= txq->txq_last - start; 1448 producer = txq->txq_first; 1449 start = txq->txq_first; 1450 } 1451 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr); 1452 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len); 1453 } 1454 producer->txdb_flags |= htole32(last_flags); 1455 #if 0 1456 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1457 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen), 1458 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi)); 1459 #endif 1460 if (count) 1461 bcmeth_txq_desc_presync(sc, txq, start, count); 1462 1463 /* 1464 * Reduce free count by the number of segments we consumed. 1465 */ 1466 txq->txq_free -= map->dm_nsegs; 1467 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1468 KASSERT(map->dm_nsegs == 1 1469 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0); 1470 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF)); 1471 1472 #if 0 1473 printf("%s: mbuf %p: produced a %u byte packet in %u segments " 1474 "(%zd..%zd)\n", __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1475 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1476 #endif 1477 1478 if (producer + 1 == txq->txq_last) 1479 txq->txq_producer = txq->txq_first; 1480 else 1481 txq->txq_producer = producer + 1; 1482 IF_ENQUEUE(&txq->txq_mbufs, m); 1483 1484 /* 1485 * Let the transmitter know there's more to do 1486 */ 1487 bcmeth_write_4(sc, txq->txq_reg_xmtptr, 1488 txq->txq_descmap->dm_segs[0].ds_addr 1489 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR)); 1490 1491 return true; 1492 } 1493 1494 static struct mbuf * 1495 bcmeth_copy_packet(struct mbuf *m) 1496 { 1497 struct mbuf *mext = NULL; 1498 size_t misalignment = 0; 1499 size_t hlen = 0; 1500 1501 for (mext = m; mext != NULL; mext = mext->m_next) { 1502 if (mext->m_flags & M_EXT) { 1503 misalignment = mtod(mext, vaddr_t) & arm_dcache_align; 1504 break; 1505 } 1506 hlen += m->m_len; 1507 } 1508 1509 struct mbuf *n = m->m_next; 1510 if (m != mext && hlen + misalignment <= MHLEN && false) { 1511 KASSERT(m->m_pktdat <= m->m_data 1512 && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]); 1513 size_t oldoff = m->m_data - m->m_pktdat; 1514 size_t off; 1515 if (mext == NULL) { 1516 off = (oldoff + hlen > MHLEN) ? 0 : oldoff; 1517 } else { 1518 off = MHLEN - (hlen + misalignment); 1519 } 1520 KASSERT(off + hlen + misalignment <= MHLEN); 1521 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) { 1522 memmove(&m->m_pktdat[off], m->m_data, m->m_len); 1523 m->m_data = &m->m_pktdat[off]; 1524 } 1525 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]); 1526 m->m_len = hlen; 1527 m->m_next = mext; 1528 while (n != mext) { 1529 n = m_free(n); 1530 } 1531 return m; 1532 } 1533 1534 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type); 1535 if (m0 == NULL) { 1536 return NULL; 1537 } 1538 m_copy_pkthdr(m0, m); 1539 MCLAIM(m0, m->m_owner); 1540 if (m0->m_pkthdr.len > MHLEN) { 1541 MCLGET(m0, M_DONTWAIT); 1542 if ((m0->m_flags & M_EXT) == 0) { 1543 m_freem(m0); 1544 return NULL; 1545 } 1546 } 1547 m0->m_len = m->m_pkthdr.len; 1548 m_copydata(m, 0, m0->m_len, mtod(m0, void *)); 1549 m_freem(m); 1550 return m0; 1551 } 1552 1553 static bool 1554 bcmeth_txq_enqueue( 1555 struct bcmeth_softc *sc, 1556 struct bcmeth_txqueue *txq) 1557 { 1558 for (;;) { 1559 if (IF_QFULL(&txq->txq_mbufs)) 1560 return false; 1561 struct mbuf *m = txq->txq_next; 1562 if (m == NULL) { 1563 int s = splnet(); 1564 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1565 splx(s); 1566 if (m == NULL) 1567 return true; 1568 M_SETCTX(m, NULL); 1569 } else { 1570 txq->txq_next = NULL; 1571 } 1572 /* 1573 * If LINK2 is set and this packet uses multiple mbufs, 1574 * consolidate it into a single mbuf. 1575 */ 1576 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) { 1577 struct mbuf *m0 = bcmeth_copy_packet(m); 1578 if (m0 == NULL) { 1579 txq->txq_next = m; 1580 return true; 1581 } 1582 m = m0; 1583 } 1584 int error = bcmeth_txq_map_load(sc, txq, m); 1585 if (error) { 1586 aprint_error_dev(sc->sc_dev, 1587 "discarded packet due to " 1588 "dmamap load failure: %d\n", error); 1589 m_freem(m); 1590 continue; 1591 } 1592 KASSERT(txq->txq_next == NULL); 1593 if (!bcmeth_txq_produce(sc, txq, m)) { 1594 txq->txq_next = m; 1595 return false; 1596 } 1597 KASSERT(txq->txq_next == NULL); 1598 } 1599 } 1600 1601 static bool 1602 bcmeth_txq_consume( 1603 struct bcmeth_softc *sc, 1604 struct bcmeth_txqueue *txq) 1605 { 1606 struct ifnet * const ifp = &sc->sc_if; 1607 struct gmac_txdb *consumer = txq->txq_consumer; 1608 size_t txfree = 0; 1609 1610 #if 0 1611 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1612 #endif 1613 1614 for (;;) { 1615 if (consumer == txq->txq_producer) { 1616 txq->txq_consumer = consumer; 1617 txq->txq_free += txfree; 1618 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 1619 #if 0 1620 printf("%s: empty: freed %zu descriptors going from " 1621 "%zu to %zu\n", __func__, txfree, 1622 txq->txq_free - txfree, txq->txq_free); 1623 #endif 1624 KASSERT(txq->txq_lastintr == 0); 1625 KASSERT(txq->txq_free 1626 == txq->txq_last - txq->txq_first - 1); 1627 return true; 1628 } 1629 bcmeth_txq_desc_postsync(sc, txq, consumer, 1); 1630 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 1631 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) { 1632 txq->txq_consumer = consumer; 1633 txq->txq_free += txfree; 1634 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 1635 #if 0 1636 printf("%s: freed %zu descriptors\n", 1637 __func__, txfree); 1638 #endif 1639 return bcmeth_txq_fillable_p(sc, txq); 1640 } 1641 1642 /* 1643 * If this is the last descriptor in the chain, get the 1644 * mbuf, free its dmamap, and free the mbuf chain itself. 1645 */ 1646 const uint32_t txdb_flags = le32toh(consumer->txdb_flags); 1647 if (txdb_flags & TXDB_FLAG_EF) { 1648 struct mbuf *m; 1649 1650 IF_DEQUEUE(&txq->txq_mbufs, m); 1651 KASSERT(m); 1652 bcmeth_txq_map_unload(sc, txq, m); 1653 #if 0 1654 printf("%s: mbuf %p: consumed a %u byte packet\n", 1655 __func__, m, m->m_pkthdr.len); 1656 #endif 1657 bpf_mtap(ifp, m, BPF_D_OUT); 1658 if_statinc(ifp, if_opackets); 1659 if_statadd(ifp, if_obytes, m->m_pkthdr.len); 1660 if (m->m_flags & M_MCAST) 1661 if_statinc(ifp, if_omcasts); 1662 m_freem(m); 1663 } 1664 1665 /* 1666 * We own this packet again. Clear all flags except wrap. 1667 */ 1668 txfree++; 1669 1670 /* 1671 * Wrap at the last entry! 1672 */ 1673 if (txdb_flags & TXDB_FLAG_ET) { 1674 consumer->txdb_flags = htole32(TXDB_FLAG_ET); 1675 KASSERT(consumer + 1 == txq->txq_last); 1676 consumer = txq->txq_first; 1677 } else { 1678 consumer->txdb_flags = 0; 1679 consumer++; 1680 KASSERT(consumer < txq->txq_last); 1681 } 1682 } 1683 } 1684 1685 static void 1686 bcmeth_txq_purge( 1687 struct bcmeth_softc *sc, 1688 struct bcmeth_txqueue *txq) 1689 { 1690 struct mbuf *m; 1691 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0); 1692 1693 for (;;) { 1694 IF_DEQUEUE(&txq->txq_mbufs, m); 1695 if (m == NULL) 1696 break; 1697 bcmeth_txq_map_unload(sc, txq, m); 1698 m_freem(m); 1699 } 1700 if ((m = txq->txq_next) != NULL) { 1701 txq->txq_next = NULL; 1702 bcmeth_txq_map_unload(sc, txq, m); 1703 m_freem(m); 1704 } 1705 } 1706 1707 static void 1708 bcmeth_txq_reset( 1709 struct bcmeth_softc *sc, 1710 struct bcmeth_txqueue *txq) 1711 { 1712 /* 1713 * sync all the descriptors 1714 */ 1715 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first, 1716 txq->txq_last - txq->txq_first); 1717 1718 /* 1719 * Make sure we own all descriptors in the ring. 1720 */ 1721 struct gmac_txdb *txdb; 1722 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) { 1723 txdb->txdb_flags = 0; 1724 } 1725 1726 /* 1727 * Last descriptor has the wrap flag. 1728 */ 1729 txdb->txdb_flags = htole32(TXDB_FLAG_ET); 1730 1731 /* 1732 * Reset the producer consumer indexes. 1733 */ 1734 txq->txq_consumer = txq->txq_first; 1735 txq->txq_producer = txq->txq_first; 1736 txq->txq_free = txq->txq_last - txq->txq_first - 1; 1737 txq->txq_threshold = txq->txq_free / 2; 1738 txq->txq_lastintr = 0; 1739 1740 /* 1741 * What do we want to get interrupted on? 1742 */ 1743 sc->sc_intmask |= XMTINT_0 | XMTUF; 1744 1745 /* 1746 * Restart the transmiter at the first descriptor 1747 */ 1748 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo, 1749 txq->txq_descmap->dm_segs->ds_addr); 1750 } 1751 1752 static void 1753 bcmeth_ifstart(struct ifnet *ifp) 1754 { 1755 struct bcmeth_softc * const sc = ifp->if_softc; 1756 1757 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 1758 return; 1759 } 1760 1761 #ifdef BCMETH_MPSAFETX 1762 if (cpu_intr_p()) { 1763 #endif 1764 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 1765 softint_schedule(sc->sc_soft_ih); 1766 #ifdef BCMETH_MPSAFETX 1767 } else { 1768 /* 1769 * Either we are in a softintr thread already or some other 1770 * thread so just borrow it to do the send and save ourselves 1771 * the overhead of a fast soft int. 1772 */ 1773 bcmeth_soft_txintr(sc); 1774 } 1775 #endif 1776 } 1777 1778 int 1779 bcmeth_intr(void *arg) 1780 { 1781 struct bcmeth_softc * const sc = arg; 1782 uint32_t soft_flags = 0; 1783 uint32_t work_flags = 0; 1784 int rv = 0; 1785 1786 mutex_enter(sc->sc_hwlock); 1787 1788 uint32_t intmask = sc->sc_intmask; 1789 BCMETH_EVCNT_INCR(sc->sc_ev_intr); 1790 1791 for (;;) { 1792 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 1793 intstatus &= intmask; 1794 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */ 1795 if (intstatus == 0) { 1796 break; 1797 } 1798 #if 0 1799 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n", 1800 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK)); 1801 #endif 1802 if (intstatus & RCVINT) { 1803 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1804 intmask &= ~RCVINT; 1805 1806 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1807 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1808 if (descs < rxq->rxq_consumer - rxq->rxq_first) { 1809 /* 1810 * We wrapped at the end so count how far 1811 * we are from the end. 1812 */ 1813 descs += rxq->rxq_last - rxq->rxq_consumer; 1814 } else { 1815 descs -= rxq->rxq_consumer - rxq->rxq_first; 1816 } 1817 /* 1818 * If we "timedout" we can't be hogging so use 1819 * softints. If we exceeded then we might hogging 1820 * so let the workqueue deal with them. 1821 */ 1822 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, 1823 INTRCVLAZY_FRAMECOUNT); 1824 if (descs < framecount 1825 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) { 1826 soft_flags |= SOFT_RXINTR; 1827 } else { 1828 work_flags |= WORK_RXINTR; 1829 } 1830 } 1831 1832 if (intstatus & XMTINT_0) { 1833 intmask &= ~XMTINT_0; 1834 soft_flags |= SOFT_TXINTR; 1835 } 1836 1837 if (intstatus & RCVDESCUF) { 1838 intmask &= ~RCVDESCUF; 1839 work_flags |= WORK_RXUNDERFLOW; 1840 } 1841 1842 intstatus &= intmask; 1843 if (intstatus) { 1844 aprint_error_dev(sc->sc_dev, 1845 "intr: intstatus=%#x\n", intstatus); 1846 aprint_error_dev(sc->sc_dev, 1847 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n", 1848 sc->sc_rxq.rxq_first, 1849 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr, 1850 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr), 1851 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0), 1852 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1)); 1853 aprint_error_dev(sc->sc_dev, 1854 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n", 1855 sc->sc_txq.txq_first, 1856 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr, 1857 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr), 1858 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0), 1859 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1)); 1860 intmask &= ~intstatus; 1861 work_flags |= WORK_REINIT; 1862 break; 1863 } 1864 } 1865 1866 if (intmask != sc->sc_intmask) { 1867 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1868 } 1869 1870 if (work_flags) { 1871 if (sc->sc_work_flags == 0) { 1872 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL); 1873 } 1874 atomic_or_32(&sc->sc_work_flags, work_flags); 1875 rv = 1; 1876 } 1877 1878 if (soft_flags) { 1879 if (sc->sc_soft_flags == 0) { 1880 softint_schedule(sc->sc_soft_ih); 1881 } 1882 atomic_or_32(&sc->sc_soft_flags, soft_flags); 1883 rv = 1; 1884 } 1885 1886 mutex_exit(sc->sc_hwlock); 1887 1888 return rv; 1889 } 1890 1891 #ifdef BCMETH_MPSAFETX 1892 void 1893 bcmeth_soft_txintr(struct bcmeth_softc *sc) 1894 { 1895 mutex_enter(sc->sc_lock); 1896 /* 1897 * Let's do what we came here for. Consume transmitted 1898 * packets off the transmit ring. 1899 */ 1900 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1901 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1902 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1903 } 1904 if (sc->sc_if.if_flags & IFF_RUNNING) { 1905 mutex_spin_enter(sc->sc_hwlock); 1906 sc->sc_intmask |= XMTINT_0; 1907 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1908 mutex_spin_exit(sc->sc_hwlock); 1909 } 1910 mutex_exit(sc->sc_lock); 1911 } 1912 #endif /* BCMETH_MPSAFETX */ 1913 1914 void 1915 bcmeth_soft_intr(void *arg) 1916 { 1917 struct bcmeth_softc * const sc = arg; 1918 struct ifnet * const ifp = &sc->sc_if; 1919 uint32_t intmask = 0; 1920 1921 mutex_enter(sc->sc_lock); 1922 1923 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 1924 1925 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr); 1926 1927 if ((soft_flags & SOFT_TXINTR) 1928 || bcmeth_txq_active_p(sc, &sc->sc_txq)) { 1929 /* 1930 * Let's do what we came here for. Consume transmitted 1931 * packets off the transmit ring. 1932 */ 1933 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1934 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1935 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1936 } 1937 intmask |= XMTINT_0; 1938 } 1939 1940 if (soft_flags & SOFT_RXINTR) { 1941 /* 1942 * Let's consume 1943 */ 1944 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1945 sc->sc_rxq.rxq_threshold / 4)) { 1946 /* 1947 * We've consumed a quarter of the ring and still have 1948 * more to do. Refill the ring. 1949 */ 1950 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1951 } 1952 intmask |= RCVINT; 1953 } 1954 1955 if (ifp->if_flags & IFF_RUNNING) { 1956 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1957 mutex_spin_enter(sc->sc_hwlock); 1958 sc->sc_intmask |= intmask; 1959 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1960 mutex_spin_exit(sc->sc_hwlock); 1961 } 1962 1963 mutex_exit(sc->sc_lock); 1964 } 1965 1966 void 1967 bcmeth_worker(struct work *wk, void *arg) 1968 { 1969 struct bcmeth_softc * const sc = arg; 1970 struct ifnet * const ifp = &sc->sc_if; 1971 uint32_t intmask = 0; 1972 1973 mutex_enter(sc->sc_lock); 1974 1975 BCMETH_EVCNT_INCR(sc->sc_ev_work); 1976 1977 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0); 1978 if (work_flags & WORK_REINIT) { 1979 int s = splnet(); 1980 sc->sc_soft_flags = 0; 1981 bcmeth_ifinit(ifp); 1982 splx(s); 1983 work_flags &= ~WORK_RXUNDERFLOW; 1984 } 1985 1986 if (work_flags & WORK_RXUNDERFLOW) { 1987 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1988 size_t threshold = 5 * rxq->rxq_threshold / 4; 1989 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 1990 threshold = rxq->rxq_last - rxq->rxq_first - 1; 1991 } else { 1992 intmask |= RCVDESCUF; 1993 } 1994 aprint_normal_dev(sc->sc_dev, 1995 "increasing receive buffers from %zu to %zu\n", 1996 rxq->rxq_threshold, threshold); 1997 rxq->rxq_threshold = threshold; 1998 } 1999 2000 if (work_flags & WORK_RXINTR) { 2001 /* 2002 * Let's consume 2003 */ 2004 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 2005 sc->sc_rxq.rxq_threshold / 4)) { 2006 /* 2007 * We've consumed a quarter of the ring and still have 2008 * more to do. Refill the ring. 2009 */ 2010 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2011 } 2012 intmask |= RCVINT; 2013 } 2014 2015 if (ifp->if_flags & IFF_RUNNING) { 2016 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2017 #if 0 2018 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 2019 if (intstatus & RCVINT) { 2020 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT); 2021 work_flags |= WORK_RXINTR; 2022 continue; 2023 } 2024 #endif 2025 mutex_spin_enter(sc->sc_hwlock); 2026 sc->sc_intmask |= intmask; 2027 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 2028 mutex_spin_exit(sc->sc_hwlock); 2029 } 2030 2031 mutex_exit(sc->sc_lock); 2032 } 2033