1 /*- 2 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas of 3am Software Foundry. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #define _ARM32_BUS_DMA_PRIVATE 31 #define GMAC_PRIVATE 32 33 #include "locators.h" 34 #include "opt_broadcom.h" 35 36 #include <sys/cdefs.h> 37 38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.29 2016/12/15 09:28:02 ozaki-r Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/atomic.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/ioctl.h> 45 #include <sys/intr.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/workqueue.h> 51 52 #include <net/if.h> 53 #include <net/if_ether.h> 54 #include <net/if_media.h> 55 56 #include <net/if_dl.h> 57 58 #include <net/bpf.h> 59 60 #include <dev/mii/miivar.h> 61 62 #include <arm/locore.h> 63 64 #include <arm/broadcom/bcm53xx_reg.h> 65 #include <arm/broadcom/bcm53xx_var.h> 66 67 //#define BCMETH_MPSAFE 68 69 #ifdef BCMETH_COUNTERS 70 #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 71 #else 72 #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 73 #endif 74 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1) 75 76 #define BCMETH_MAXTXMBUFS 128 77 #define BCMETH_NTXSEGS 30 78 #define BCMETH_MAXRXMBUFS 255 79 #define BCMETH_MINRXMBUFS 64 80 #define BCMETH_NRXSEGS 1 81 #define BCMETH_RINGSIZE PAGE_SIZE 82 83 #if 1 84 #define BCMETH_RCVMAGIC 0xfeedface 85 #endif 86 87 static int bcmeth_ccb_match(device_t, cfdata_t, void *); 88 static void bcmeth_ccb_attach(device_t, device_t, void *); 89 90 struct bcmeth_txqueue { 91 bus_dmamap_t txq_descmap; 92 struct gmac_txdb *txq_consumer; 93 struct gmac_txdb *txq_producer; 94 struct gmac_txdb *txq_first; 95 struct gmac_txdb *txq_last; 96 struct ifqueue txq_mbufs; 97 struct mbuf *txq_next; 98 size_t txq_free; 99 size_t txq_threshold; 100 size_t txq_lastintr; 101 bus_size_t txq_reg_xmtaddrlo; 102 bus_size_t txq_reg_xmtptr; 103 bus_size_t txq_reg_xmtctl; 104 bus_size_t txq_reg_xmtsts0; 105 bus_size_t txq_reg_xmtsts1; 106 bus_dma_segment_t txq_descmap_seg; 107 }; 108 109 struct bcmeth_rxqueue { 110 bus_dmamap_t rxq_descmap; 111 struct gmac_rxdb *rxq_consumer; 112 struct gmac_rxdb *rxq_producer; 113 struct gmac_rxdb *rxq_first; 114 struct gmac_rxdb *rxq_last; 115 struct mbuf *rxq_mhead; 116 struct mbuf **rxq_mtail; 117 struct mbuf *rxq_mconsumer; 118 size_t rxq_inuse; 119 size_t rxq_threshold; 120 bus_size_t rxq_reg_rcvaddrlo; 121 bus_size_t rxq_reg_rcvptr; 122 bus_size_t rxq_reg_rcvctl; 123 bus_size_t rxq_reg_rcvsts0; 124 bus_size_t rxq_reg_rcvsts1; 125 bus_dma_segment_t rxq_descmap_seg; 126 }; 127 128 struct bcmeth_mapcache { 129 u_int dmc_nmaps; 130 u_int dmc_maxseg; 131 u_int dmc_maxmaps; 132 u_int dmc_maxmapsize; 133 bus_dmamap_t dmc_maps[0]; 134 }; 135 136 struct bcmeth_softc { 137 device_t sc_dev; 138 bus_space_tag_t sc_bst; 139 bus_space_handle_t sc_bsh; 140 bus_dma_tag_t sc_dmat; 141 kmutex_t *sc_lock; 142 kmutex_t *sc_hwlock; 143 struct ethercom sc_ec; 144 #define sc_if sc_ec.ec_if 145 struct ifmedia sc_media; 146 void *sc_soft_ih; 147 void *sc_ih; 148 149 struct bcmeth_rxqueue sc_rxq; 150 struct bcmeth_txqueue sc_txq; 151 152 size_t sc_rcvoffset; 153 uint32_t sc_macaddr[2]; 154 uint32_t sc_maxfrm; 155 uint32_t sc_cmdcfg; 156 uint32_t sc_intmask; 157 uint32_t sc_rcvlazy; 158 volatile uint32_t sc_soft_flags; 159 #define SOFT_RXINTR 0x01 160 #define SOFT_TXINTR 0x02 161 162 #ifdef BCMETH_COUNTERS 163 struct evcnt sc_ev_intr; 164 struct evcnt sc_ev_soft_intr; 165 struct evcnt sc_ev_work; 166 struct evcnt sc_ev_tx_stall; 167 struct evcnt sc_ev_rx_badmagic_lo; 168 struct evcnt sc_ev_rx_badmagic_hi; 169 #endif 170 171 struct ifqueue sc_rx_bufcache; 172 struct bcmeth_mapcache *sc_rx_mapcache; 173 struct bcmeth_mapcache *sc_tx_mapcache; 174 175 struct workqueue *sc_workq; 176 struct work sc_work; 177 178 volatile uint32_t sc_work_flags; 179 #define WORK_RXINTR 0x01 180 #define WORK_RXUNDERFLOW 0x02 181 #define WORK_REINIT 0x04 182 183 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 184 }; 185 186 static void bcmeth_ifstart(struct ifnet *); 187 static void bcmeth_ifwatchdog(struct ifnet *); 188 static int bcmeth_ifinit(struct ifnet *); 189 static void bcmeth_ifstop(struct ifnet *, int); 190 static int bcmeth_ifioctl(struct ifnet *, u_long, void *); 191 192 static int bcmeth_mapcache_create(struct bcmeth_softc *, 193 struct bcmeth_mapcache **, size_t, size_t, size_t); 194 static void bcmeth_mapcache_destroy(struct bcmeth_softc *, 195 struct bcmeth_mapcache *); 196 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *, 197 struct bcmeth_mapcache *); 198 static void bcmeth_mapcache_put(struct bcmeth_softc *, 199 struct bcmeth_mapcache *, bus_dmamap_t); 200 201 static int bcmeth_txq_attach(struct bcmeth_softc *, 202 struct bcmeth_txqueue *, u_int); 203 static void bcmeth_txq_purge(struct bcmeth_softc *, 204 struct bcmeth_txqueue *); 205 static void bcmeth_txq_reset(struct bcmeth_softc *, 206 struct bcmeth_txqueue *); 207 static bool bcmeth_txq_consume(struct bcmeth_softc *, 208 struct bcmeth_txqueue *); 209 static bool bcmeth_txq_produce(struct bcmeth_softc *, 210 struct bcmeth_txqueue *, struct mbuf *m); 211 static bool bcmeth_txq_active_p(struct bcmeth_softc *, 212 struct bcmeth_txqueue *); 213 214 static int bcmeth_rxq_attach(struct bcmeth_softc *, 215 struct bcmeth_rxqueue *, u_int); 216 static bool bcmeth_rxq_produce(struct bcmeth_softc *, 217 struct bcmeth_rxqueue *); 218 static void bcmeth_rxq_purge(struct bcmeth_softc *, 219 struct bcmeth_rxqueue *, bool); 220 static void bcmeth_rxq_reset(struct bcmeth_softc *, 221 struct bcmeth_rxqueue *); 222 223 static int bcmeth_intr(void *); 224 #ifdef BCMETH_MPSAFETX 225 static void bcmeth_soft_txintr(struct bcmeth_softc *); 226 #endif 227 static void bcmeth_soft_intr(void *); 228 static void bcmeth_worker(struct work *, void *); 229 230 static int bcmeth_mediachange(struct ifnet *); 231 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *); 232 233 static inline uint32_t 234 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o) 235 { 236 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o); 237 } 238 239 static inline void 240 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v) 241 { 242 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v); 243 } 244 245 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc), 246 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL); 247 248 static int 249 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux) 250 { 251 struct bcmccb_attach_args * const ccbaa = aux; 252 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 253 254 if (strcmp(cf->cf_name, loc->loc_name)) 255 return 0; 256 257 #ifdef DIAGNOSTIC 258 const int port = cf->cf_loc[BCMCCBCF_PORT]; 259 #endif 260 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port); 261 262 return 1; 263 } 264 265 static void 266 bcmeth_ccb_attach(device_t parent, device_t self, void *aux) 267 { 268 struct bcmeth_softc * const sc = device_private(self); 269 struct ethercom * const ec = &sc->sc_ec; 270 struct ifnet * const ifp = &ec->ec_if; 271 struct bcmccb_attach_args * const ccbaa = aux; 272 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 273 const char * const xname = device_xname(self); 274 prop_dictionary_t dict = device_properties(self); 275 int error; 276 277 sc->sc_bst = ccbaa->ccbaa_ccb_bst; 278 sc->sc_dmat = ccbaa->ccbaa_dmat; 279 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh, 280 loc->loc_offset, loc->loc_size, &sc->sc_bsh); 281 282 /* 283 * We need to use the coherent dma tag for the GMAC. 284 */ 285 sc->sc_dmat = &bcm53xx_coherent_dma_tag; 286 #if _ARM32_NEED_BUS_DMA_BOUNCE 287 if (device_cfdata(self)->cf_flags & 2) { 288 sc->sc_dmat = &bcm53xx_bounce_dma_tag; 289 } 290 #endif 291 292 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address"); 293 if (eaprop == NULL) { 294 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0); 295 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1); 296 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) { 297 aprint_error(": mac-address property is missing\n"); 298 return; 299 } 300 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff; 301 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff; 302 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff; 303 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff; 304 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff; 305 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff; 306 } else { 307 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 308 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 309 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop), 310 ETHER_ADDR_LEN); 311 } 312 sc->sc_dev = self; 313 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 314 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 315 316 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts 317 318 aprint_naive("\n"); 319 aprint_normal(": Gigabit Ethernet Controller\n"); 320 321 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0); 322 if (error) { 323 aprint_error(": failed to init rxq: %d\n", error); 324 return; 325 } 326 327 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 328 if (error) { 329 aprint_error(": failed to init txq: %d\n", error); 330 return; 331 } 332 333 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache, 334 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS); 335 if (error) { 336 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 337 return; 338 } 339 340 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 341 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS); 342 if (error) { 343 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 344 return; 345 } 346 347 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc, 348 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU); 349 if (error) { 350 aprint_error(": failed to create workqueue: %d\n", error); 351 return; 352 } 353 354 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET, 355 bcmeth_soft_intr, sc); 356 357 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL, 358 bcmeth_intr, sc); 359 360 if (sc->sc_ih == NULL) { 361 aprint_error_dev(self, "failed to establish interrupt %d\n", 362 loc->loc_intrs[0]); 363 } else { 364 aprint_normal_dev(self, "interrupting on irq %d\n", 365 loc->loc_intrs[0]); 366 } 367 368 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 369 ether_sprintf(sc->sc_enaddr)); 370 371 /* 372 * Since each port in plugged into the switch/flow-accelerator, 373 * we hard code at Gige Full-Duplex with Flow Control enabled. 374 */ 375 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX; 376 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE; 377 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange, 378 bcmeth_mediastatus); 379 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL); 380 ifmedia_set(&sc->sc_media, ifmedia); 381 382 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 383 384 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 385 ifp->if_softc = sc; 386 ifp->if_baudrate = IF_Mbps(1000); 387 ifp->if_capabilities = 0; 388 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 389 #ifdef BCMETH_MPSAFE 390 ifp->if_flags2 = IFF2_MPSAFE; 391 #endif 392 ifp->if_ioctl = bcmeth_ifioctl; 393 ifp->if_start = bcmeth_ifstart; 394 ifp->if_watchdog = bcmeth_ifwatchdog; 395 ifp->if_init = bcmeth_ifinit; 396 ifp->if_stop = bcmeth_ifstop; 397 IFQ_SET_READY(&ifp->if_snd); 398 399 bcmeth_ifstop(ifp, true); 400 401 /* 402 * Attach the interface. 403 */ 404 if_initialize(ifp); 405 ether_ifattach(ifp, sc->sc_enaddr); 406 if_register(ifp); 407 408 #ifdef BCMETH_COUNTERS 409 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 410 NULL, xname, "intr"); 411 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 412 NULL, xname, "soft intr"); 413 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC, 414 NULL, xname, "work items"); 415 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 416 NULL, xname, "tx stalls"); 417 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC, 418 NULL, xname, "rx badmagic lo"); 419 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC, 420 NULL, xname, "rx badmagic hi"); 421 #endif 422 } 423 424 static int 425 bcmeth_mediachange(struct ifnet *ifp) 426 { 427 //struct bcmeth_softc * const sc = ifp->if_softc; 428 return 0; 429 } 430 431 static void 432 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm) 433 { 434 //struct bcmeth_softc * const sc = ifp->if_softc; 435 436 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE; 437 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T; 438 } 439 440 static uint64_t 441 bcmeth_macaddr_create(const uint8_t *enaddr) 442 { 443 return (enaddr[3] << 0) // UNIMAC_MAC_0 444 | (enaddr[2] << 8) // UNIMAC_MAC_0 445 | (enaddr[1] << 16) // UNIMAC_MAC_0 446 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0 447 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1 448 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1 449 } 450 451 static int 452 bcmeth_ifinit(struct ifnet *ifp) 453 { 454 struct bcmeth_softc * const sc = ifp->if_softc; 455 int error = 0; 456 457 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 458 if (ifp->if_mtu > ETHERMTU_JUMBO) 459 return error; 460 461 KASSERT(ifp->if_flags & IFF_UP); 462 463 /* 464 * Stop the interface 465 */ 466 bcmeth_ifstop(ifp, 0); 467 468 /* 469 * Reserve enough space at the front so that we can insert a maxsized 470 * link header and a VLAN tag. Also make sure we have enough room for 471 * the rcvsts field as well. 472 */ 473 KASSERT(ALIGN(max_linkhdr) == max_linkhdr); 474 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu", 475 max_linkhdr, sizeof(struct ether_header)); 476 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header); 477 if (sc->sc_rcvoffset <= 4) 478 sc->sc_rcvoffset += 4; 479 KASSERT((sc->sc_rcvoffset & 3) == 2); 480 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET)); 481 KASSERT(sc->sc_rcvoffset >= 6); 482 483 /* 484 * If our frame size has changed (or it's our first time through) 485 * destroy the existing transmit mapcache. 486 */ 487 if (sc->sc_tx_mapcache != NULL 488 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 489 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache); 490 sc->sc_tx_mapcache = NULL; 491 } 492 493 if (sc->sc_tx_mapcache == NULL) { 494 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 495 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS); 496 if (error) 497 return error; 498 } 499 500 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE 501 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED) 502 | RX_ENA | TX_ENA; 503 504 if (ifp->if_flags & IFF_PROMISC) { 505 sc->sc_cmdcfg |= PROMISC_EN; 506 } else { 507 sc->sc_cmdcfg &= ~PROMISC_EN; 508 } 509 510 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl); 511 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr); 512 513 /* 514 * We make sure that a received Ethernet packet start on a non-word 515 * boundary so that the packet payload will be on a word boundary. 516 * So to check the destination address we keep around two words to 517 * quickly compare with. 518 */ 519 #if __ARMEL__ 520 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8); 521 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8) 522 | (lladdr[4] << 16) | (lladdr[5] << 24); 523 #else 524 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8); 525 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8) 526 | (lladdr[1] << 16) | (lladdr[2] << 24); 527 #endif 528 529 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR; 530 531 /* 5. Load RCVADDR_LO with new pointer */ 532 bcmeth_rxq_reset(sc, &sc->sc_rxq); 533 534 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 535 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET) 536 | RCVCTL_PARITY_DIS 537 | RCVCTL_OFLOW_CONTINUE 538 | __SHIFTIN(3, RCVCTL_BURSTLEN)); 539 540 /* 6. Load XMTADDR_LO with new pointer */ 541 bcmeth_txq_reset(sc, &sc->sc_txq); 542 543 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 544 | XMTCTL_PARITY_DIS 545 | __SHIFTIN(3, XMTCTL_BURSTLEN)); 546 547 /* 7. Setup other UNIMAC registers */ 548 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm); 549 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0)); 550 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32)); 551 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg); 552 553 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL); 554 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE; 555 devctl &= ~FLOW_CTRL_MODE; 556 devctl &= ~MIB_RD_RESET_EN; 557 devctl &= ~RXQ_OVERFLOW_CTRL_SEL; 558 devctl &= ~CPU_FLOW_CTRL_ON; 559 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl); 560 561 /* Setup lazy receive (at most 1ms). */ 562 const struct cpu_softc * const cpu = curcpu()->ci_softc; 563 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT) 564 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT); 565 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy); 566 567 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 568 sc->sc_intmask |= XMTINT_0|XMTUF; 569 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 570 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 571 572 573 /* 12. Enable receive queues in RQUEUE, */ 574 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF; 575 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 576 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE); 577 578 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 579 580 #if 0 581 aprint_normal_dev(sc->sc_dev, 582 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n", 583 devctl, sc->sc_cmdcfg, 584 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 585 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl)); 586 #endif 587 588 sc->sc_soft_flags = 0; 589 590 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 591 592 ifp->if_flags |= IFF_RUNNING; 593 594 return error; 595 } 596 597 static void 598 bcmeth_ifstop(struct ifnet *ifp, int disable) 599 { 600 struct bcmeth_softc * const sc = ifp->if_softc; 601 struct bcmeth_txqueue * const txq = &sc->sc_txq; 602 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 603 604 KASSERT(!cpu_intr_p()); 605 606 sc->sc_soft_flags = 0; 607 sc->sc_work_flags = 0; 608 609 /* Disable Rx processing */ 610 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl, 611 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE); 612 613 /* Disable Tx processing */ 614 bcmeth_write_4(sc, txq->txq_reg_xmtctl, 615 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE); 616 617 /* Disable all interrupts */ 618 bcmeth_write_4(sc, GMAC_INTMASK, 0); 619 620 for (;;) { 621 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 622 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 623 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS 624 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS) 625 break; 626 delay(50); 627 } 628 /* 629 * Now reset the controller. 630 * 631 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register 632 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register 633 */ 634 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET); 635 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0); 636 sc->sc_intmask = 0; 637 ifp->if_flags &= ~IFF_RUNNING; 638 639 /* 640 * Let's consume any remaining transmitted packets. And if we are 641 * disabling the interface, purge ourselves of any untransmitted 642 * packets. But don't consume any received packets, just drop them. 643 * If we aren't disabling the interface, save the mbufs in the 644 * receive queue for reuse. 645 */ 646 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable); 647 bcmeth_txq_consume(sc, &sc->sc_txq); 648 if (disable) { 649 bcmeth_txq_purge(sc, &sc->sc_txq); 650 IF_PURGE(&ifp->if_snd); 651 } 652 653 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0); 654 } 655 656 static void 657 bcmeth_ifwatchdog(struct ifnet *ifp) 658 { 659 } 660 661 static int 662 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 663 { 664 struct bcmeth_softc *sc = ifp->if_softc; 665 struct ifreq * const ifr = data; 666 const int s = splnet(); 667 int error; 668 669 switch (cmd) { 670 case SIOCSIFMEDIA: 671 case SIOCGIFMEDIA: 672 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 673 break; 674 675 default: 676 error = ether_ioctl(ifp, cmd, data); 677 if (error != ENETRESET) 678 break; 679 680 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 681 error = 0; 682 break; 683 } 684 error = bcmeth_ifinit(ifp); 685 break; 686 } 687 688 splx(s); 689 return error; 690 } 691 692 static void 693 bcmeth_rxq_desc_presync( 694 struct bcmeth_softc *sc, 695 struct bcmeth_rxqueue *rxq, 696 struct gmac_rxdb *rxdb, 697 size_t count) 698 { 699 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 700 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 701 BUS_DMASYNC_PREWRITE); 702 } 703 704 static void 705 bcmeth_rxq_desc_postsync( 706 struct bcmeth_softc *sc, 707 struct bcmeth_rxqueue *rxq, 708 struct gmac_rxdb *rxdb, 709 size_t count) 710 { 711 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 712 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 713 BUS_DMASYNC_POSTWRITE); 714 } 715 716 static void 717 bcmeth_txq_desc_presync( 718 struct bcmeth_softc *sc, 719 struct bcmeth_txqueue *txq, 720 struct gmac_txdb *txdb, 721 size_t count) 722 { 723 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 724 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 725 BUS_DMASYNC_PREWRITE); 726 } 727 728 static void 729 bcmeth_txq_desc_postsync( 730 struct bcmeth_softc *sc, 731 struct bcmeth_txqueue *txq, 732 struct gmac_txdb *txdb, 733 size_t count) 734 { 735 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 736 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 737 BUS_DMASYNC_POSTWRITE); 738 } 739 740 static bus_dmamap_t 741 bcmeth_mapcache_get( 742 struct bcmeth_softc *sc, 743 struct bcmeth_mapcache *dmc) 744 { 745 KASSERT(dmc->dmc_nmaps > 0); 746 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 747 return dmc->dmc_maps[--dmc->dmc_nmaps]; 748 } 749 750 static void 751 bcmeth_mapcache_put( 752 struct bcmeth_softc *sc, 753 struct bcmeth_mapcache *dmc, 754 bus_dmamap_t map) 755 { 756 KASSERT(map != NULL); 757 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 758 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 759 } 760 761 static void 762 bcmeth_mapcache_destroy( 763 struct bcmeth_softc *sc, 764 struct bcmeth_mapcache *dmc) 765 { 766 const size_t dmc_size = 767 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]); 768 769 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 770 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 771 } 772 kmem_intr_free(dmc, dmc_size); 773 } 774 775 static int 776 bcmeth_mapcache_create( 777 struct bcmeth_softc *sc, 778 struct bcmeth_mapcache **dmc_p, 779 size_t maxmaps, 780 size_t maxmapsize, 781 size_t maxseg) 782 { 783 const size_t dmc_size = 784 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]); 785 struct bcmeth_mapcache * const dmc = 786 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 787 788 dmc->dmc_maxmaps = maxmaps; 789 dmc->dmc_nmaps = maxmaps; 790 dmc->dmc_maxmapsize = maxmapsize; 791 dmc->dmc_maxseg = maxseg; 792 793 for (u_int i = 0; i < maxmaps; i++) { 794 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 795 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 796 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 797 if (error) { 798 aprint_error_dev(sc->sc_dev, 799 "failed to creat dma map cache " 800 "entry %u of %zu: %d\n", 801 i, maxmaps, error); 802 while (i-- > 0) { 803 bus_dmamap_destroy(sc->sc_dmat, 804 dmc->dmc_maps[i]); 805 } 806 kmem_intr_free(dmc, dmc_size); 807 return error; 808 } 809 KASSERT(dmc->dmc_maps[i] != NULL); 810 } 811 812 *dmc_p = dmc; 813 814 return 0; 815 } 816 817 #if 0 818 static void 819 bcmeth_dmamem_free( 820 bus_dma_tag_t dmat, 821 size_t map_size, 822 bus_dma_segment_t *seg, 823 bus_dmamap_t map, 824 void *kvap) 825 { 826 bus_dmamap_destroy(dmat, map); 827 bus_dmamem_unmap(dmat, kvap, map_size); 828 bus_dmamem_free(dmat, seg, 1); 829 } 830 #endif 831 832 static int 833 bcmeth_dmamem_alloc( 834 bus_dma_tag_t dmat, 835 size_t map_size, 836 bus_dma_segment_t *seg, 837 bus_dmamap_t *map, 838 void **kvap) 839 { 840 int error; 841 int nseg; 842 843 *kvap = NULL; 844 *map = NULL; 845 846 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0, 847 seg, 1, &nseg, 0); 848 if (error) 849 return error; 850 851 KASSERT(nseg == 1); 852 853 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0); 854 if (error == 0) { 855 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 856 map); 857 if (error == 0) { 858 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 859 NULL, 0); 860 if (error == 0) 861 return 0; 862 bus_dmamap_destroy(dmat, *map); 863 *map = NULL; 864 } 865 bus_dmamem_unmap(dmat, *kvap, map_size); 866 *kvap = NULL; 867 } 868 bus_dmamem_free(dmat, seg, nseg); 869 return 0; 870 } 871 872 static struct mbuf * 873 bcmeth_rx_buf_alloc( 874 struct bcmeth_softc *sc) 875 { 876 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 877 if (m == NULL) { 878 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 879 return NULL; 880 } 881 MCLGET(m, M_DONTWAIT); 882 if ((m->m_flags & M_EXT) == 0) { 883 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 884 m_freem(m); 885 return NULL; 886 } 887 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 888 889 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache); 890 if (map == NULL) { 891 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 892 m_freem(m); 893 return NULL; 894 } 895 M_SETCTX(m, map); 896 m->m_len = m->m_pkthdr.len = MCLBYTES; 897 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 898 BUS_DMA_READ|BUS_DMA_NOWAIT); 899 if (error) { 900 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 901 error); 902 M_SETCTX(m, NULL); 903 m_freem(m); 904 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 905 return NULL; 906 } 907 KASSERT(map->dm_mapsize == MCLBYTES); 908 #ifdef BCMETH_RCVMAGIC 909 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC); 910 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t), 911 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 912 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t), 913 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD); 914 #else 915 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 916 BUS_DMASYNC_PREREAD); 917 #endif 918 919 return m; 920 } 921 922 static void 923 bcmeth_rx_map_unload( 924 struct bcmeth_softc *sc, 925 struct mbuf *m) 926 { 927 KASSERT(m); 928 for (; m != NULL; m = m->m_next) { 929 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 930 KASSERT(map); 931 KASSERT(map->dm_mapsize == MCLBYTES); 932 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 933 BUS_DMASYNC_POSTREAD); 934 bus_dmamap_unload(sc->sc_dmat, map); 935 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 936 M_SETCTX(m, NULL); 937 } 938 } 939 940 static bool 941 bcmeth_rxq_produce( 942 struct bcmeth_softc *sc, 943 struct bcmeth_rxqueue *rxq) 944 { 945 struct gmac_rxdb *producer = rxq->rxq_producer; 946 bool produced = false; 947 948 while (rxq->rxq_inuse < rxq->rxq_threshold) { 949 struct mbuf *m; 950 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 951 if (m == NULL) { 952 m = bcmeth_rx_buf_alloc(sc); 953 if (m == NULL) { 954 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__); 955 break; 956 } 957 } 958 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 959 KASSERT(map); 960 961 producer->rxdb_buflen = htole32(MCLBYTES); 962 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr); 963 producer->rxdb_flags &= htole32(RXDB_FLAG_ET); 964 *rxq->rxq_mtail = m; 965 rxq->rxq_mtail = &m->m_next; 966 m->m_len = MCLBYTES; 967 m->m_next = NULL; 968 rxq->rxq_inuse++; 969 if (++producer == rxq->rxq_last) { 970 membar_producer(); 971 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 972 rxq->rxq_last - rxq->rxq_producer); 973 producer = rxq->rxq_producer = rxq->rxq_first; 974 } 975 produced = true; 976 } 977 if (produced) { 978 membar_producer(); 979 if (producer != rxq->rxq_producer) { 980 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 981 producer - rxq->rxq_producer); 982 rxq->rxq_producer = producer; 983 } 984 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr, 985 rxq->rxq_descmap->dm_segs[0].ds_addr 986 + ((uintptr_t)producer & RCVPTR)); 987 } 988 return true; 989 } 990 991 static void 992 bcmeth_rx_input( 993 struct bcmeth_softc *sc, 994 struct mbuf *m, 995 uint32_t rxdb_flags) 996 { 997 struct ifnet * const ifp = &sc->sc_if; 998 999 bcmeth_rx_map_unload(sc, m); 1000 1001 m_adj(m, sc->sc_rcvoffset); 1002 1003 /* 1004 * If we are in promiscuous mode and this isn't a multicast, check the 1005 * destination address to make sure it matches our own. If it doesn't, 1006 * mark the packet as being received promiscuously. 1007 */ 1008 if ((sc->sc_cmdcfg & PROMISC_EN) 1009 && (m->m_data[0] & 1) == 0 1010 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0] 1011 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) { 1012 m->m_flags |= M_PROMISC; 1013 } 1014 m_set_rcvif(m, ifp); 1015 1016 ifp->if_ibytes += m->m_pkthdr.len; 1017 1018 /* 1019 * Let's give it to the network subsystm to deal with. 1020 */ 1021 #ifdef BCMETH_MPSAFE 1022 mutex_exit(sc->sc_lock); 1023 if_input(ifp, m); 1024 mutex_enter(sc->sc_lock); 1025 #else 1026 int s = splnet(); 1027 if_input(ifp, m); 1028 splx(s); 1029 #endif 1030 } 1031 1032 static bool 1033 bcmeth_rxq_consume( 1034 struct bcmeth_softc *sc, 1035 struct bcmeth_rxqueue *rxq, 1036 size_t atmost) 1037 { 1038 struct ifnet * const ifp = &sc->sc_if; 1039 struct gmac_rxdb *consumer = rxq->rxq_consumer; 1040 size_t rxconsumed = 0; 1041 bool didconsume = false; 1042 1043 while (atmost-- > 0) { 1044 if (consumer == rxq->rxq_producer) { 1045 KASSERT(rxq->rxq_inuse == 0); 1046 break; 1047 } 1048 1049 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1050 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1051 if (consumer == rxq->rxq_first + currdscr) { 1052 break; 1053 } 1054 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1); 1055 1056 /* 1057 * We own this packet again. Copy the rxsts word from it. 1058 */ 1059 rxconsumed++; 1060 didconsume = true; 1061 uint32_t rxsts; 1062 KASSERT(rxq->rxq_mhead != NULL); 1063 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t); 1064 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align, 1065 BUS_DMASYNC_POSTREAD); 1066 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4); 1067 rxsts = le32toh(rxsts); 1068 #if 0 1069 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd", 1070 currdscr, consumer - rxq->rxq_first); 1071 #endif 1072 1073 /* 1074 * Get the count of descriptors. Fetch the correct number 1075 * of mbufs. 1076 */ 1077 #ifdef BCMETH_RCVMAGIC 1078 size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1; 1079 #else 1080 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1; 1081 #endif 1082 struct mbuf *m = rxq->rxq_mhead; 1083 struct mbuf *m_last = m; 1084 for (size_t i = 1; i < desc_count; i++) { 1085 if (++consumer == rxq->rxq_last) { 1086 consumer = rxq->rxq_first; 1087 } 1088 KASSERTMSG(consumer != rxq->rxq_first + currdscr, 1089 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd", 1090 i, rxsts, desc_count, currdscr, 1091 consumer - rxq->rxq_first); 1092 m_last = m_last->m_next; 1093 } 1094 1095 /* 1096 * Now remove it/them from the list of enqueued mbufs. 1097 */ 1098 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1099 rxq->rxq_mtail = &rxq->rxq_mhead; 1100 m_last->m_next = NULL; 1101 1102 #ifdef BCMETH_RCVMAGIC 1103 if (rxsts == BCMETH_RCVMAGIC) { 1104 ifp->if_ierrors++; 1105 if ((m->m_ext.ext_paddr >> 28) == 8) { 1106 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo); 1107 } else { 1108 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi); 1109 } 1110 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1111 } else 1112 #endif /* BCMETH_RCVMAGIC */ 1113 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) { 1114 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n", 1115 consumer - rxq->rxq_first, desc_count, rxsts); 1116 /* 1117 * We encountered an error, take the mbufs and add them 1118 * to the rx bufcache so we can quickly reuse them. 1119 */ 1120 ifp->if_ierrors++; 1121 do { 1122 struct mbuf *m0 = m->m_next; 1123 m->m_next = NULL; 1124 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1125 m = m0; 1126 } while (m); 1127 } else { 1128 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN); 1129 framelen += sc->sc_rcvoffset; 1130 m->m_pkthdr.len = framelen; 1131 if (desc_count == 1) { 1132 KASSERT(framelen <= MCLBYTES); 1133 m->m_len = framelen; 1134 } else { 1135 m_last->m_len = framelen & (MCLBYTES - 1); 1136 } 1137 1138 #ifdef BCMETH_MPSAFE 1139 /* 1140 * Wrap at the last entry! 1141 */ 1142 if (++consumer == rxq->rxq_last) { 1143 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1144 rxq->rxq_consumer = rxq->rxq_first; 1145 } else { 1146 rxq->rxq_consumer = consumer; 1147 } 1148 rxq->rxq_inuse -= rxconsumed; 1149 #endif /* BCMETH_MPSAFE */ 1150 1151 /* 1152 * Receive the packet (which releases our lock) 1153 */ 1154 bcmeth_rx_input(sc, m, rxsts); 1155 1156 #ifdef BCMETH_MPSAFE 1157 /* 1158 * Since we had to give up our lock, we need to 1159 * refresh these. 1160 */ 1161 consumer = rxq->rxq_consumer; 1162 rxconsumed = 0; 1163 continue; 1164 #endif /* BCMETH_MPSAFE */ 1165 } 1166 1167 /* 1168 * Wrap at the last entry! 1169 */ 1170 if (++consumer == rxq->rxq_last) { 1171 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1172 consumer = rxq->rxq_first; 1173 } 1174 } 1175 1176 /* 1177 * Update queue info. 1178 */ 1179 rxq->rxq_consumer = consumer; 1180 rxq->rxq_inuse -= rxconsumed; 1181 1182 /* 1183 * Did we consume anything? 1184 */ 1185 return didconsume; 1186 } 1187 1188 static void 1189 bcmeth_rxq_purge( 1190 struct bcmeth_softc *sc, 1191 struct bcmeth_rxqueue *rxq, 1192 bool discard) 1193 { 1194 struct mbuf *m; 1195 1196 if ((m = rxq->rxq_mhead) != NULL) { 1197 if (discard) { 1198 bcmeth_rx_map_unload(sc, m); 1199 m_freem(m); 1200 } else { 1201 while (m != NULL) { 1202 struct mbuf *m0 = m->m_next; 1203 m->m_next = NULL; 1204 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1205 m = m0; 1206 } 1207 } 1208 1209 } 1210 1211 rxq->rxq_mhead = NULL; 1212 rxq->rxq_mtail = &rxq->rxq_mhead; 1213 rxq->rxq_inuse = 0; 1214 } 1215 1216 static void 1217 bcmeth_rxq_reset( 1218 struct bcmeth_softc *sc, 1219 struct bcmeth_rxqueue *rxq) 1220 { 1221 /* 1222 * sync all the descriptors 1223 */ 1224 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1225 rxq->rxq_last - rxq->rxq_first); 1226 1227 /* 1228 * Make sure we own all descriptors in the ring. 1229 */ 1230 struct gmac_rxdb *rxdb; 1231 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) { 1232 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC); 1233 } 1234 1235 /* 1236 * Last descriptor has the wrap flag. 1237 */ 1238 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET|RXDB_FLAG_IC); 1239 1240 /* 1241 * Reset the producer consumer indexes. 1242 */ 1243 rxq->rxq_consumer = rxq->rxq_first; 1244 rxq->rxq_producer = rxq->rxq_first; 1245 rxq->rxq_inuse = 0; 1246 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS) 1247 rxq->rxq_threshold = BCMETH_MINRXMBUFS; 1248 1249 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF; 1250 1251 /* 1252 * Restart the receiver at the first descriptor 1253 */ 1254 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo, 1255 rxq->rxq_descmap->dm_segs[0].ds_addr); 1256 } 1257 1258 static int 1259 bcmeth_rxq_attach( 1260 struct bcmeth_softc *sc, 1261 struct bcmeth_rxqueue *rxq, 1262 u_int qno) 1263 { 1264 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]); 1265 int error; 1266 void *descs; 1267 1268 KASSERT(desc_count == 256 || desc_count == 512); 1269 1270 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1271 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1272 if (error) 1273 return error; 1274 1275 memset(descs, 0, BCMETH_RINGSIZE); 1276 rxq->rxq_first = descs; 1277 rxq->rxq_last = rxq->rxq_first + desc_count; 1278 rxq->rxq_consumer = descs; 1279 rxq->rxq_producer = descs; 1280 1281 bcmeth_rxq_purge(sc, rxq, true); 1282 bcmeth_rxq_reset(sc, rxq); 1283 1284 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW; 1285 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL; 1286 rxq->rxq_reg_rcvptr = GMAC_RCVPTR; 1287 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0; 1288 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1; 1289 1290 return 0; 1291 } 1292 1293 static bool 1294 bcmeth_txq_active_p( 1295 struct bcmeth_softc * const sc, 1296 struct bcmeth_txqueue *txq) 1297 { 1298 return !IF_IS_EMPTY(&txq->txq_mbufs); 1299 } 1300 1301 static bool 1302 bcmeth_txq_fillable_p( 1303 struct bcmeth_softc * const sc, 1304 struct bcmeth_txqueue *txq) 1305 { 1306 return txq->txq_free >= txq->txq_threshold; 1307 } 1308 1309 static int 1310 bcmeth_txq_attach( 1311 struct bcmeth_softc *sc, 1312 struct bcmeth_txqueue *txq, 1313 u_int qno) 1314 { 1315 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]); 1316 int error; 1317 void *descs; 1318 1319 KASSERT(desc_count == 256 || desc_count == 512); 1320 1321 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1322 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1323 if (error) 1324 return error; 1325 1326 memset(descs, 0, BCMETH_RINGSIZE); 1327 txq->txq_first = descs; 1328 txq->txq_last = txq->txq_first + desc_count; 1329 txq->txq_consumer = descs; 1330 txq->txq_producer = descs; 1331 1332 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS); 1333 1334 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW; 1335 txq->txq_reg_xmtctl = GMAC_XMTCONTROL; 1336 txq->txq_reg_xmtptr = GMAC_XMTPTR; 1337 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0; 1338 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1; 1339 1340 bcmeth_txq_reset(sc, txq); 1341 1342 return 0; 1343 } 1344 1345 static int 1346 bcmeth_txq_map_load( 1347 struct bcmeth_softc *sc, 1348 struct bcmeth_txqueue *txq, 1349 struct mbuf *m) 1350 { 1351 bus_dmamap_t map; 1352 int error; 1353 1354 map = M_GETCTX(m, bus_dmamap_t); 1355 if (map != NULL) 1356 return 0; 1357 1358 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache); 1359 if (map == NULL) 1360 return ENOMEM; 1361 1362 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1363 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1364 if (error) 1365 return error; 1366 1367 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1368 BUS_DMASYNC_PREWRITE); 1369 M_SETCTX(m, map); 1370 return 0; 1371 } 1372 1373 static void 1374 bcmeth_txq_map_unload( 1375 struct bcmeth_softc *sc, 1376 struct bcmeth_txqueue *txq, 1377 struct mbuf *m) 1378 { 1379 KASSERT(m); 1380 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1381 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1382 BUS_DMASYNC_POSTWRITE); 1383 bus_dmamap_unload(sc->sc_dmat, map); 1384 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map); 1385 } 1386 1387 static bool 1388 bcmeth_txq_produce( 1389 struct bcmeth_softc *sc, 1390 struct bcmeth_txqueue *txq, 1391 struct mbuf *m) 1392 { 1393 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1394 1395 if (map->dm_nsegs > txq->txq_free) 1396 return false; 1397 1398 /* 1399 * TCP Offload flag must be set in the first descriptor. 1400 */ 1401 struct gmac_txdb *producer = txq->txq_producer; 1402 uint32_t first_flags = TXDB_FLAG_SF; 1403 uint32_t last_flags = TXDB_FLAG_EF; 1404 1405 /* 1406 * If we've produced enough descriptors without consuming any 1407 * we need to ask for an interrupt to reclaim some. 1408 */ 1409 txq->txq_lastintr += map->dm_nsegs; 1410 if (txq->txq_lastintr >= txq->txq_threshold 1411 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1412 txq->txq_lastintr = 0; 1413 last_flags |= TXDB_FLAG_IC; 1414 } 1415 1416 KASSERT(producer != txq->txq_last); 1417 1418 struct gmac_txdb *start = producer; 1419 size_t count = map->dm_nsegs; 1420 producer->txdb_flags |= htole32(first_flags); 1421 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr); 1422 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len); 1423 for (u_int i = 1; i < map->dm_nsegs; i++) { 1424 #if 0 1425 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1426 le32toh(producer->txdb_flags), 1427 le32toh(producer->txdb_buflen), 1428 le32toh(producer->txdb_addrlo), 1429 le32toh(producer->txdb_addrhi)); 1430 #endif 1431 if (__predict_false(++producer == txq->txq_last)) { 1432 bcmeth_txq_desc_presync(sc, txq, start, 1433 txq->txq_last - start); 1434 count -= txq->txq_last - start; 1435 producer = txq->txq_first; 1436 start = txq->txq_first; 1437 } 1438 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr); 1439 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len); 1440 } 1441 producer->txdb_flags |= htole32(last_flags); 1442 #if 0 1443 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1444 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen), 1445 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi)); 1446 #endif 1447 if (count) 1448 bcmeth_txq_desc_presync(sc, txq, start, count); 1449 1450 /* 1451 * Reduce free count by the number of segments we consumed. 1452 */ 1453 txq->txq_free -= map->dm_nsegs; 1454 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1455 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0); 1456 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF)); 1457 1458 #if 0 1459 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n", 1460 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1461 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1462 #endif 1463 1464 if (producer + 1 == txq->txq_last) 1465 txq->txq_producer = txq->txq_first; 1466 else 1467 txq->txq_producer = producer + 1; 1468 IF_ENQUEUE(&txq->txq_mbufs, m); 1469 1470 /* 1471 * Let the transmitter know there's more to do 1472 */ 1473 bcmeth_write_4(sc, txq->txq_reg_xmtptr, 1474 txq->txq_descmap->dm_segs[0].ds_addr 1475 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR)); 1476 1477 return true; 1478 } 1479 1480 static struct mbuf * 1481 bcmeth_copy_packet(struct mbuf *m) 1482 { 1483 struct mbuf *mext = NULL; 1484 size_t misalignment = 0; 1485 size_t hlen = 0; 1486 1487 for (mext = m; mext != NULL; mext = mext->m_next) { 1488 if (mext->m_flags & M_EXT) { 1489 misalignment = mtod(mext, vaddr_t) & arm_dcache_align; 1490 break; 1491 } 1492 hlen += m->m_len; 1493 } 1494 1495 struct mbuf *n = m->m_next; 1496 if (m != mext && hlen + misalignment <= MHLEN && false) { 1497 KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]); 1498 size_t oldoff = m->m_data - m->m_pktdat; 1499 size_t off; 1500 if (mext == NULL) { 1501 off = (oldoff + hlen > MHLEN) ? 0 : oldoff; 1502 } else { 1503 off = MHLEN - (hlen + misalignment); 1504 } 1505 KASSERT(off + hlen + misalignment <= MHLEN); 1506 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) { 1507 memmove(&m->m_pktdat[off], m->m_data, m->m_len); 1508 m->m_data = &m->m_pktdat[off]; 1509 } 1510 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]); 1511 m->m_len = hlen; 1512 m->m_next = mext; 1513 while (n != mext) { 1514 n = m_free(n); 1515 } 1516 return m; 1517 } 1518 1519 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type); 1520 if (m0 == NULL) { 1521 return NULL; 1522 } 1523 M_COPY_PKTHDR(m0, m); 1524 MCLAIM(m0, m->m_owner); 1525 if (m0->m_pkthdr.len > MHLEN) { 1526 MCLGET(m0, M_DONTWAIT); 1527 if ((m0->m_flags & M_EXT) == 0) { 1528 m_freem(m0); 1529 return NULL; 1530 } 1531 } 1532 m0->m_len = m->m_pkthdr.len; 1533 m_copydata(m, 0, m0->m_len, mtod(m0, void *)); 1534 m_freem(m); 1535 return m0; 1536 } 1537 1538 static bool 1539 bcmeth_txq_enqueue( 1540 struct bcmeth_softc *sc, 1541 struct bcmeth_txqueue *txq) 1542 { 1543 for (;;) { 1544 if (IF_QFULL(&txq->txq_mbufs)) 1545 return false; 1546 struct mbuf *m = txq->txq_next; 1547 if (m == NULL) { 1548 int s = splnet(); 1549 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1550 splx(s); 1551 if (m == NULL) 1552 return true; 1553 M_SETCTX(m, NULL); 1554 } else { 1555 txq->txq_next = NULL; 1556 } 1557 /* 1558 * If LINK2 is set and this packet uses multiple mbufs, 1559 * consolidate it into a single mbuf. 1560 */ 1561 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) { 1562 struct mbuf *m0 = bcmeth_copy_packet(m); 1563 if (m0 == NULL) { 1564 txq->txq_next = m; 1565 return true; 1566 } 1567 m = m0; 1568 } 1569 int error = bcmeth_txq_map_load(sc, txq, m); 1570 if (error) { 1571 aprint_error_dev(sc->sc_dev, 1572 "discarded packet due to " 1573 "dmamap load failure: %d\n", error); 1574 m_freem(m); 1575 continue; 1576 } 1577 KASSERT(txq->txq_next == NULL); 1578 if (!bcmeth_txq_produce(sc, txq, m)) { 1579 txq->txq_next = m; 1580 return false; 1581 } 1582 KASSERT(txq->txq_next == NULL); 1583 } 1584 } 1585 1586 static bool 1587 bcmeth_txq_consume( 1588 struct bcmeth_softc *sc, 1589 struct bcmeth_txqueue *txq) 1590 { 1591 struct ifnet * const ifp = &sc->sc_if; 1592 struct gmac_txdb *consumer = txq->txq_consumer; 1593 size_t txfree = 0; 1594 1595 #if 0 1596 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1597 #endif 1598 1599 for (;;) { 1600 if (consumer == txq->txq_producer) { 1601 txq->txq_consumer = consumer; 1602 txq->txq_free += txfree; 1603 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1604 #if 0 1605 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n", 1606 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 1607 #endif 1608 KASSERT(txq->txq_lastintr == 0); 1609 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 1610 return true; 1611 } 1612 bcmeth_txq_desc_postsync(sc, txq, consumer, 1); 1613 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 1614 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) { 1615 txq->txq_consumer = consumer; 1616 txq->txq_free += txfree; 1617 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1618 #if 0 1619 printf("%s: freed %zu descriptors\n", 1620 __func__, txfree); 1621 #endif 1622 return bcmeth_txq_fillable_p(sc, txq); 1623 } 1624 1625 /* 1626 * If this is the last descriptor in the chain, get the 1627 * mbuf, free its dmamap, and free the mbuf chain itself. 1628 */ 1629 const uint32_t txdb_flags = le32toh(consumer->txdb_flags); 1630 if (txdb_flags & TXDB_FLAG_EF) { 1631 struct mbuf *m; 1632 1633 IF_DEQUEUE(&txq->txq_mbufs, m); 1634 KASSERT(m); 1635 bcmeth_txq_map_unload(sc, txq, m); 1636 #if 0 1637 printf("%s: mbuf %p: consumed a %u byte packet\n", 1638 __func__, m, m->m_pkthdr.len); 1639 #endif 1640 bpf_mtap(ifp, m); 1641 ifp->if_opackets++; 1642 ifp->if_obytes += m->m_pkthdr.len; 1643 if (m->m_flags & M_MCAST) 1644 ifp->if_omcasts++; 1645 m_freem(m); 1646 } 1647 1648 /* 1649 * We own this packet again. Clear all flags except wrap. 1650 */ 1651 txfree++; 1652 1653 /* 1654 * Wrap at the last entry! 1655 */ 1656 if (txdb_flags & TXDB_FLAG_ET) { 1657 consumer->txdb_flags = htole32(TXDB_FLAG_ET); 1658 KASSERT(consumer + 1 == txq->txq_last); 1659 consumer = txq->txq_first; 1660 } else { 1661 consumer->txdb_flags = 0; 1662 consumer++; 1663 KASSERT(consumer < txq->txq_last); 1664 } 1665 } 1666 } 1667 1668 static void 1669 bcmeth_txq_purge( 1670 struct bcmeth_softc *sc, 1671 struct bcmeth_txqueue *txq) 1672 { 1673 struct mbuf *m; 1674 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0); 1675 1676 for (;;) { 1677 IF_DEQUEUE(&txq->txq_mbufs, m); 1678 if (m == NULL) 1679 break; 1680 bcmeth_txq_map_unload(sc, txq, m); 1681 m_freem(m); 1682 } 1683 if ((m = txq->txq_next) != NULL) { 1684 txq->txq_next = NULL; 1685 bcmeth_txq_map_unload(sc, txq, m); 1686 m_freem(m); 1687 } 1688 } 1689 1690 static void 1691 bcmeth_txq_reset( 1692 struct bcmeth_softc *sc, 1693 struct bcmeth_txqueue *txq) 1694 { 1695 /* 1696 * sync all the descriptors 1697 */ 1698 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first, 1699 txq->txq_last - txq->txq_first); 1700 1701 /* 1702 * Make sure we own all descriptors in the ring. 1703 */ 1704 struct gmac_txdb *txdb; 1705 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) { 1706 txdb->txdb_flags = 0; 1707 } 1708 1709 /* 1710 * Last descriptor has the wrap flag. 1711 */ 1712 txdb->txdb_flags = htole32(TXDB_FLAG_ET); 1713 1714 /* 1715 * Reset the producer consumer indexes. 1716 */ 1717 txq->txq_consumer = txq->txq_first; 1718 txq->txq_producer = txq->txq_first; 1719 txq->txq_free = txq->txq_last - txq->txq_first - 1; 1720 txq->txq_threshold = txq->txq_free / 2; 1721 txq->txq_lastintr = 0; 1722 1723 /* 1724 * What do we want to get interrupted on? 1725 */ 1726 sc->sc_intmask |= XMTINT_0 | XMTUF; 1727 1728 /* 1729 * Restart the transmiter at the first descriptor 1730 */ 1731 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo, 1732 txq->txq_descmap->dm_segs->ds_addr); 1733 } 1734 1735 static void 1736 bcmeth_ifstart(struct ifnet *ifp) 1737 { 1738 struct bcmeth_softc * const sc = ifp->if_softc; 1739 1740 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 1741 return; 1742 } 1743 1744 #ifdef BCMETH_MPSAFETX 1745 if (cpu_intr_p()) { 1746 #endif 1747 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 1748 softint_schedule(sc->sc_soft_ih); 1749 #ifdef BCMETH_MPSAFETX 1750 } else { 1751 /* 1752 * Either we are in a softintr thread already or some other 1753 * thread so just borrow it to do the send and save ourselves 1754 * the overhead of a fast soft int. 1755 */ 1756 bcmeth_soft_txintr(sc); 1757 } 1758 #endif 1759 } 1760 1761 int 1762 bcmeth_intr(void *arg) 1763 { 1764 struct bcmeth_softc * const sc = arg; 1765 uint32_t soft_flags = 0; 1766 uint32_t work_flags = 0; 1767 int rv = 0; 1768 1769 mutex_enter(sc->sc_hwlock); 1770 1771 uint32_t intmask = sc->sc_intmask; 1772 BCMETH_EVCNT_INCR(sc->sc_ev_intr); 1773 1774 for (;;) { 1775 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 1776 intstatus &= intmask; 1777 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */ 1778 if (intstatus == 0) { 1779 break; 1780 } 1781 #if 0 1782 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n", 1783 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK)); 1784 #endif 1785 if (intstatus & RCVINT) { 1786 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1787 intmask &= ~RCVINT; 1788 1789 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1790 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1791 if (descs < rxq->rxq_consumer - rxq->rxq_first) { 1792 /* 1793 * We wrapped at the end so count how far 1794 * we are from the end. 1795 */ 1796 descs += rxq->rxq_last - rxq->rxq_consumer; 1797 } else { 1798 descs -= rxq->rxq_consumer - rxq->rxq_first; 1799 } 1800 /* 1801 * If we "timedout" we can't be hogging so use 1802 * softints. If we exceeded then we might hogging 1803 * so let the workqueue deal with them. 1804 */ 1805 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT); 1806 if (descs < framecount 1807 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) { 1808 soft_flags |= SOFT_RXINTR; 1809 } else { 1810 work_flags |= WORK_RXINTR; 1811 } 1812 } 1813 1814 if (intstatus & XMTINT_0) { 1815 intmask &= ~XMTINT_0; 1816 soft_flags |= SOFT_TXINTR; 1817 } 1818 1819 if (intstatus & RCVDESCUF) { 1820 intmask &= ~RCVDESCUF; 1821 work_flags |= WORK_RXUNDERFLOW; 1822 } 1823 1824 intstatus &= intmask; 1825 if (intstatus) { 1826 aprint_error_dev(sc->sc_dev, 1827 "intr: intstatus=%#x\n", intstatus); 1828 aprint_error_dev(sc->sc_dev, 1829 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n", 1830 sc->sc_rxq.rxq_first, 1831 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr, 1832 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr), 1833 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0), 1834 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1)); 1835 aprint_error_dev(sc->sc_dev, 1836 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n", 1837 sc->sc_txq.txq_first, 1838 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr, 1839 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr), 1840 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0), 1841 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1)); 1842 intmask &= ~intstatus; 1843 work_flags |= WORK_REINIT; 1844 break; 1845 } 1846 } 1847 1848 if (intmask != sc->sc_intmask) { 1849 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1850 } 1851 1852 if (work_flags) { 1853 if (sc->sc_work_flags == 0) { 1854 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL); 1855 } 1856 atomic_or_32(&sc->sc_work_flags, work_flags); 1857 rv = 1; 1858 } 1859 1860 if (soft_flags) { 1861 if (sc->sc_soft_flags == 0) { 1862 softint_schedule(sc->sc_soft_ih); 1863 } 1864 atomic_or_32(&sc->sc_soft_flags, soft_flags); 1865 rv = 1; 1866 } 1867 1868 mutex_exit(sc->sc_hwlock); 1869 1870 return rv; 1871 } 1872 1873 #ifdef BCMETH_MPSAFETX 1874 void 1875 bcmeth_soft_txintr(struct bcmeth_softc *sc) 1876 { 1877 mutex_enter(sc->sc_lock); 1878 /* 1879 * Let's do what we came here for. Consume transmitted 1880 * packets off the the transmit ring. 1881 */ 1882 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1883 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1884 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1885 sc->sc_if.if_flags |= IFF_OACTIVE; 1886 } else { 1887 sc->sc_if.if_flags &= ~IFF_OACTIVE; 1888 } 1889 if (sc->sc_if.if_flags & IFF_RUNNING) { 1890 mutex_spin_enter(sc->sc_hwlock); 1891 sc->sc_intmask |= XMTINT_0; 1892 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1893 mutex_spin_exit(sc->sc_hwlock); 1894 } 1895 mutex_exit(sc->sc_lock); 1896 } 1897 #endif /* BCMETH_MPSAFETX */ 1898 1899 void 1900 bcmeth_soft_intr(void *arg) 1901 { 1902 struct bcmeth_softc * const sc = arg; 1903 struct ifnet * const ifp = &sc->sc_if; 1904 uint32_t intmask = 0; 1905 1906 mutex_enter(sc->sc_lock); 1907 1908 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 1909 1910 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr); 1911 1912 if ((soft_flags & SOFT_TXINTR) 1913 || bcmeth_txq_active_p(sc, &sc->sc_txq)) { 1914 /* 1915 * Let's do what we came here for. Consume transmitted 1916 * packets off the the transmit ring. 1917 */ 1918 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1919 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1920 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1921 ifp->if_flags |= IFF_OACTIVE; 1922 } else { 1923 ifp->if_flags &= ~IFF_OACTIVE; 1924 } 1925 intmask |= XMTINT_0; 1926 } 1927 1928 if (soft_flags & SOFT_RXINTR) { 1929 /* 1930 * Let's consume 1931 */ 1932 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1933 sc->sc_rxq.rxq_threshold / 4)) { 1934 /* 1935 * We've consumed a quarter of the ring and still have 1936 * more to do. Refill the ring. 1937 */ 1938 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1939 } 1940 intmask |= RCVINT; 1941 } 1942 1943 if (ifp->if_flags & IFF_RUNNING) { 1944 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1945 mutex_spin_enter(sc->sc_hwlock); 1946 sc->sc_intmask |= intmask; 1947 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1948 mutex_spin_exit(sc->sc_hwlock); 1949 } 1950 1951 mutex_exit(sc->sc_lock); 1952 } 1953 1954 void 1955 bcmeth_worker(struct work *wk, void *arg) 1956 { 1957 struct bcmeth_softc * const sc = arg; 1958 struct ifnet * const ifp = &sc->sc_if; 1959 uint32_t intmask = 0; 1960 1961 mutex_enter(sc->sc_lock); 1962 1963 BCMETH_EVCNT_INCR(sc->sc_ev_work); 1964 1965 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0); 1966 if (work_flags & WORK_REINIT) { 1967 int s = splnet(); 1968 sc->sc_soft_flags = 0; 1969 bcmeth_ifinit(ifp); 1970 splx(s); 1971 work_flags &= ~WORK_RXUNDERFLOW; 1972 } 1973 1974 if (work_flags & WORK_RXUNDERFLOW) { 1975 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1976 size_t threshold = 5 * rxq->rxq_threshold / 4; 1977 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 1978 threshold = rxq->rxq_last - rxq->rxq_first - 1; 1979 } else { 1980 intmask |= RCVDESCUF; 1981 } 1982 aprint_normal_dev(sc->sc_dev, 1983 "increasing receive buffers from %zu to %zu\n", 1984 rxq->rxq_threshold, threshold); 1985 rxq->rxq_threshold = threshold; 1986 } 1987 1988 if (work_flags & WORK_RXINTR) { 1989 /* 1990 * Let's consume 1991 */ 1992 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1993 sc->sc_rxq.rxq_threshold / 4)) { 1994 /* 1995 * We've consumed a quarter of the ring and still have 1996 * more to do. Refill the ring. 1997 */ 1998 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1999 } 2000 intmask |= RCVINT; 2001 } 2002 2003 if (ifp->if_flags & IFF_RUNNING) { 2004 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2005 #if 0 2006 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 2007 if (intstatus & RCVINT) { 2008 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT); 2009 work_flags |= WORK_RXINTR; 2010 continue; 2011 } 2012 #endif 2013 mutex_spin_enter(sc->sc_hwlock); 2014 sc->sc_intmask |= intmask; 2015 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 2016 mutex_spin_exit(sc->sc_hwlock); 2017 } 2018 2019 mutex_exit(sc->sc_lock); 2020 } 2021