1 /*- 2 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas of 3am Software Foundry. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #define _ARM32_BUS_DMA_PRIVATE 31 #define GMAC_PRIVATE 32 33 #include "locators.h" 34 #include "opt_broadcom.h" 35 36 #include <sys/cdefs.h> 37 38 __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.30 2017/10/23 09:23:25 msaitoh Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/atomic.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/ioctl.h> 45 #include <sys/intr.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/workqueue.h> 51 52 #include <net/if.h> 53 #include <net/if_ether.h> 54 #include <net/if_media.h> 55 56 #include <net/if_dl.h> 57 58 #include <net/bpf.h> 59 60 #include <dev/mii/miivar.h> 61 62 #include <arm/locore.h> 63 64 #include <arm/broadcom/bcm53xx_reg.h> 65 #include <arm/broadcom/bcm53xx_var.h> 66 67 //#define BCMETH_MPSAFE 68 69 #ifdef BCMETH_COUNTERS 70 #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 71 #else 72 #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 73 #endif 74 #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1) 75 76 #define BCMETH_MAXTXMBUFS 128 77 #define BCMETH_NTXSEGS 30 78 #define BCMETH_MAXRXMBUFS 255 79 #define BCMETH_MINRXMBUFS 64 80 #define BCMETH_NRXSEGS 1 81 #define BCMETH_RINGSIZE PAGE_SIZE 82 83 #if 1 84 #define BCMETH_RCVMAGIC 0xfeedface 85 #endif 86 87 static int bcmeth_ccb_match(device_t, cfdata_t, void *); 88 static void bcmeth_ccb_attach(device_t, device_t, void *); 89 90 struct bcmeth_txqueue { 91 bus_dmamap_t txq_descmap; 92 struct gmac_txdb *txq_consumer; 93 struct gmac_txdb *txq_producer; 94 struct gmac_txdb *txq_first; 95 struct gmac_txdb *txq_last; 96 struct ifqueue txq_mbufs; 97 struct mbuf *txq_next; 98 size_t txq_free; 99 size_t txq_threshold; 100 size_t txq_lastintr; 101 bus_size_t txq_reg_xmtaddrlo; 102 bus_size_t txq_reg_xmtptr; 103 bus_size_t txq_reg_xmtctl; 104 bus_size_t txq_reg_xmtsts0; 105 bus_size_t txq_reg_xmtsts1; 106 bus_dma_segment_t txq_descmap_seg; 107 }; 108 109 struct bcmeth_rxqueue { 110 bus_dmamap_t rxq_descmap; 111 struct gmac_rxdb *rxq_consumer; 112 struct gmac_rxdb *rxq_producer; 113 struct gmac_rxdb *rxq_first; 114 struct gmac_rxdb *rxq_last; 115 struct mbuf *rxq_mhead; 116 struct mbuf **rxq_mtail; 117 struct mbuf *rxq_mconsumer; 118 size_t rxq_inuse; 119 size_t rxq_threshold; 120 bus_size_t rxq_reg_rcvaddrlo; 121 bus_size_t rxq_reg_rcvptr; 122 bus_size_t rxq_reg_rcvctl; 123 bus_size_t rxq_reg_rcvsts0; 124 bus_size_t rxq_reg_rcvsts1; 125 bus_dma_segment_t rxq_descmap_seg; 126 }; 127 128 struct bcmeth_mapcache { 129 u_int dmc_nmaps; 130 u_int dmc_maxseg; 131 u_int dmc_maxmaps; 132 u_int dmc_maxmapsize; 133 bus_dmamap_t dmc_maps[0]; 134 }; 135 136 struct bcmeth_softc { 137 device_t sc_dev; 138 bus_space_tag_t sc_bst; 139 bus_space_handle_t sc_bsh; 140 bus_dma_tag_t sc_dmat; 141 kmutex_t *sc_lock; 142 kmutex_t *sc_hwlock; 143 struct ethercom sc_ec; 144 #define sc_if sc_ec.ec_if 145 struct ifmedia sc_media; 146 void *sc_soft_ih; 147 void *sc_ih; 148 149 struct bcmeth_rxqueue sc_rxq; 150 struct bcmeth_txqueue sc_txq; 151 152 size_t sc_rcvoffset; 153 uint32_t sc_macaddr[2]; 154 uint32_t sc_maxfrm; 155 uint32_t sc_cmdcfg; 156 uint32_t sc_intmask; 157 uint32_t sc_rcvlazy; 158 volatile uint32_t sc_soft_flags; 159 #define SOFT_RXINTR 0x01 160 #define SOFT_TXINTR 0x02 161 162 #ifdef BCMETH_COUNTERS 163 struct evcnt sc_ev_intr; 164 struct evcnt sc_ev_soft_intr; 165 struct evcnt sc_ev_work; 166 struct evcnt sc_ev_tx_stall; 167 struct evcnt sc_ev_rx_badmagic_lo; 168 struct evcnt sc_ev_rx_badmagic_hi; 169 #endif 170 171 struct ifqueue sc_rx_bufcache; 172 struct bcmeth_mapcache *sc_rx_mapcache; 173 struct bcmeth_mapcache *sc_tx_mapcache; 174 175 struct workqueue *sc_workq; 176 struct work sc_work; 177 178 volatile uint32_t sc_work_flags; 179 #define WORK_RXINTR 0x01 180 #define WORK_RXUNDERFLOW 0x02 181 #define WORK_REINIT 0x04 182 183 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 184 }; 185 186 static void bcmeth_ifstart(struct ifnet *); 187 static void bcmeth_ifwatchdog(struct ifnet *); 188 static int bcmeth_ifinit(struct ifnet *); 189 static void bcmeth_ifstop(struct ifnet *, int); 190 static int bcmeth_ifioctl(struct ifnet *, u_long, void *); 191 192 static int bcmeth_mapcache_create(struct bcmeth_softc *, 193 struct bcmeth_mapcache **, size_t, size_t, size_t); 194 static void bcmeth_mapcache_destroy(struct bcmeth_softc *, 195 struct bcmeth_mapcache *); 196 static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *, 197 struct bcmeth_mapcache *); 198 static void bcmeth_mapcache_put(struct bcmeth_softc *, 199 struct bcmeth_mapcache *, bus_dmamap_t); 200 201 static int bcmeth_txq_attach(struct bcmeth_softc *, 202 struct bcmeth_txqueue *, u_int); 203 static void bcmeth_txq_purge(struct bcmeth_softc *, 204 struct bcmeth_txqueue *); 205 static void bcmeth_txq_reset(struct bcmeth_softc *, 206 struct bcmeth_txqueue *); 207 static bool bcmeth_txq_consume(struct bcmeth_softc *, 208 struct bcmeth_txqueue *); 209 static bool bcmeth_txq_produce(struct bcmeth_softc *, 210 struct bcmeth_txqueue *, struct mbuf *m); 211 static bool bcmeth_txq_active_p(struct bcmeth_softc *, 212 struct bcmeth_txqueue *); 213 214 static int bcmeth_rxq_attach(struct bcmeth_softc *, 215 struct bcmeth_rxqueue *, u_int); 216 static bool bcmeth_rxq_produce(struct bcmeth_softc *, 217 struct bcmeth_rxqueue *); 218 static void bcmeth_rxq_purge(struct bcmeth_softc *, 219 struct bcmeth_rxqueue *, bool); 220 static void bcmeth_rxq_reset(struct bcmeth_softc *, 221 struct bcmeth_rxqueue *); 222 223 static int bcmeth_intr(void *); 224 #ifdef BCMETH_MPSAFETX 225 static void bcmeth_soft_txintr(struct bcmeth_softc *); 226 #endif 227 static void bcmeth_soft_intr(void *); 228 static void bcmeth_worker(struct work *, void *); 229 230 static int bcmeth_mediachange(struct ifnet *); 231 static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *); 232 233 static inline uint32_t 234 bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o) 235 { 236 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o); 237 } 238 239 static inline void 240 bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v) 241 { 242 bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v); 243 } 244 245 CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc), 246 bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL); 247 248 static int 249 bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux) 250 { 251 struct bcmccb_attach_args * const ccbaa = aux; 252 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 253 254 if (strcmp(cf->cf_name, loc->loc_name)) 255 return 0; 256 257 #ifdef DIAGNOSTIC 258 const int port = cf->cf_loc[BCMCCBCF_PORT]; 259 #endif 260 KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port); 261 262 return 1; 263 } 264 265 static void 266 bcmeth_ccb_attach(device_t parent, device_t self, void *aux) 267 { 268 struct bcmeth_softc * const sc = device_private(self); 269 struct ethercom * const ec = &sc->sc_ec; 270 struct ifnet * const ifp = &ec->ec_if; 271 struct bcmccb_attach_args * const ccbaa = aux; 272 const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; 273 const char * const xname = device_xname(self); 274 prop_dictionary_t dict = device_properties(self); 275 int error; 276 277 sc->sc_bst = ccbaa->ccbaa_ccb_bst; 278 sc->sc_dmat = ccbaa->ccbaa_dmat; 279 bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh, 280 loc->loc_offset, loc->loc_size, &sc->sc_bsh); 281 282 /* 283 * We need to use the coherent dma tag for the GMAC. 284 */ 285 sc->sc_dmat = &bcm53xx_coherent_dma_tag; 286 #if _ARM32_NEED_BUS_DMA_BOUNCE 287 if (device_cfdata(self)->cf_flags & 2) { 288 sc->sc_dmat = &bcm53xx_bounce_dma_tag; 289 } 290 #endif 291 292 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address"); 293 if (eaprop == NULL) { 294 uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0); 295 uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1); 296 if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) { 297 aprint_error(": mac-address property is missing\n"); 298 return; 299 } 300 sc->sc_enaddr[0] = (mac0 >> 0) & 0xff; 301 sc->sc_enaddr[1] = (mac0 >> 8) & 0xff; 302 sc->sc_enaddr[2] = (mac0 >> 16) & 0xff; 303 sc->sc_enaddr[3] = (mac0 >> 24) & 0xff; 304 sc->sc_enaddr[4] = (mac1 >> 0) & 0xff; 305 sc->sc_enaddr[5] = (mac1 >> 8) & 0xff; 306 } else { 307 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 308 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 309 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop), 310 ETHER_ADDR_LEN); 311 } 312 sc->sc_dev = self; 313 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 314 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 315 316 bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts 317 318 aprint_naive("\n"); 319 aprint_normal(": Gigabit Ethernet Controller\n"); 320 321 error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0); 322 if (error) { 323 aprint_error(": failed to init rxq: %d\n", error); 324 goto fail_1; 325 } 326 327 error = bcmeth_txq_attach(sc, &sc->sc_txq, 0); 328 if (error) { 329 aprint_error(": failed to init txq: %d\n", error); 330 goto fail_1; 331 } 332 333 error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache, 334 BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS); 335 if (error) { 336 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 337 goto fail_1; 338 } 339 340 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 341 BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS); 342 if (error) { 343 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 344 goto fail_1; 345 } 346 347 error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc, 348 (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU); 349 if (error) { 350 aprint_error(": failed to create workqueue: %d\n", error); 351 goto fail_2; 352 } 353 354 sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET, 355 bcmeth_soft_intr, sc); 356 357 if (sc->sc_ih == NULL) { 358 aprint_error_dev(self, "failed to establish interrupt %d\n", 359 loc->loc_intrs[0]); 360 goto fail_3; 361 } 362 363 sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL, 364 bcmeth_intr, sc); 365 366 if (sc->sc_ih == NULL) { 367 aprint_error_dev(self, "failed to establish interrupt %d\n", 368 loc->loc_intrs[0]); 369 goto fail_4; 370 } else { 371 aprint_normal_dev(self, "interrupting on irq %d\n", 372 loc->loc_intrs[0]); 373 } 374 375 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 376 ether_sprintf(sc->sc_enaddr)); 377 378 /* 379 * Since each port in plugged into the switch/flow-accelerator, 380 * we hard code at Gige Full-Duplex with Flow Control enabled. 381 */ 382 int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX; 383 //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE; 384 ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange, 385 bcmeth_mediastatus); 386 ifmedia_add(&sc->sc_media, ifmedia, 0, NULL); 387 ifmedia_set(&sc->sc_media, ifmedia); 388 389 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 390 391 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 392 ifp->if_softc = sc; 393 ifp->if_baudrate = IF_Mbps(1000); 394 ifp->if_capabilities = 0; 395 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 396 #ifdef BCMETH_MPSAFE 397 ifp->if_flags2 = IFF2_MPSAFE; 398 #endif 399 ifp->if_ioctl = bcmeth_ifioctl; 400 ifp->if_start = bcmeth_ifstart; 401 ifp->if_watchdog = bcmeth_ifwatchdog; 402 ifp->if_init = bcmeth_ifinit; 403 ifp->if_stop = bcmeth_ifstop; 404 IFQ_SET_READY(&ifp->if_snd); 405 406 bcmeth_ifstop(ifp, true); 407 408 /* 409 * Attach the interface. 410 */ 411 error = if_initialize(ifp); 412 if (error != 0) { 413 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 414 error); 415 goto fail_5; 416 } 417 ether_ifattach(ifp, sc->sc_enaddr); 418 if_register(ifp); 419 420 #ifdef BCMETH_COUNTERS 421 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 422 NULL, xname, "intr"); 423 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 424 NULL, xname, "soft intr"); 425 evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC, 426 NULL, xname, "work items"); 427 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 428 NULL, xname, "tx stalls"); 429 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC, 430 NULL, xname, "rx badmagic lo"); 431 evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC, 432 NULL, xname, "rx badmagic hi"); 433 #endif 434 435 return; 436 437 fail_5: 438 ifmedia_removeall(&sc->sc_media); 439 fail_4: 440 intr_disestablish(sc->sc_ih); 441 fail_3: 442 softint_disestablish(sc->sc_soft_ih); 443 fail_2: 444 workqueue_destroy(sc->sc_workq); 445 fail_1: 446 mutex_obj_free(sc->sc_lock); 447 mutex_obj_free(sc->sc_hwlock); 448 } 449 450 static int 451 bcmeth_mediachange(struct ifnet *ifp) 452 { 453 //struct bcmeth_softc * const sc = ifp->if_softc; 454 return 0; 455 } 456 457 static void 458 bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm) 459 { 460 //struct bcmeth_softc * const sc = ifp->if_softc; 461 462 ifm->ifm_status = IFM_AVALID | IFM_ACTIVE; 463 ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T; 464 } 465 466 static uint64_t 467 bcmeth_macaddr_create(const uint8_t *enaddr) 468 { 469 return (enaddr[3] << 0) // UNIMAC_MAC_0 470 | (enaddr[2] << 8) // UNIMAC_MAC_0 471 | (enaddr[1] << 16) // UNIMAC_MAC_0 472 | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0 473 | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1 474 | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1 475 } 476 477 static int 478 bcmeth_ifinit(struct ifnet *ifp) 479 { 480 struct bcmeth_softc * const sc = ifp->if_softc; 481 int error = 0; 482 483 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 484 if (ifp->if_mtu > ETHERMTU_JUMBO) 485 return error; 486 487 KASSERT(ifp->if_flags & IFF_UP); 488 489 /* 490 * Stop the interface 491 */ 492 bcmeth_ifstop(ifp, 0); 493 494 /* 495 * Reserve enough space at the front so that we can insert a maxsized 496 * link header and a VLAN tag. Also make sure we have enough room for 497 * the rcvsts field as well. 498 */ 499 KASSERT(ALIGN(max_linkhdr) == max_linkhdr); 500 KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu", 501 max_linkhdr, sizeof(struct ether_header)); 502 sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header); 503 if (sc->sc_rcvoffset <= 4) 504 sc->sc_rcvoffset += 4; 505 KASSERT((sc->sc_rcvoffset & 3) == 2); 506 KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET)); 507 KASSERT(sc->sc_rcvoffset >= 6); 508 509 /* 510 * If our frame size has changed (or it's our first time through) 511 * destroy the existing transmit mapcache. 512 */ 513 if (sc->sc_tx_mapcache != NULL 514 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 515 bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache); 516 sc->sc_tx_mapcache = NULL; 517 } 518 519 if (sc->sc_tx_mapcache == NULL) { 520 error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache, 521 BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS); 522 if (error) 523 return error; 524 } 525 526 sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE 527 | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED) 528 | RX_ENA | TX_ENA; 529 530 if (ifp->if_flags & IFF_PROMISC) { 531 sc->sc_cmdcfg |= PROMISC_EN; 532 } else { 533 sc->sc_cmdcfg &= ~PROMISC_EN; 534 } 535 536 const uint8_t * const lladdr = CLLADDR(ifp->if_sadl); 537 const uint64_t macstnaddr = bcmeth_macaddr_create(lladdr); 538 539 /* 540 * We make sure that a received Ethernet packet start on a non-word 541 * boundary so that the packet payload will be on a word boundary. 542 * So to check the destination address we keep around two words to 543 * quickly compare with. 544 */ 545 #if __ARMEL__ 546 sc->sc_macaddr[0] = lladdr[0] | (lladdr[1] << 8); 547 sc->sc_macaddr[1] = lladdr[2] | (lladdr[3] << 8) 548 | (lladdr[4] << 16) | (lladdr[5] << 24); 549 #else 550 sc->sc_macaddr[0] = lladdr[1] | (lladdr[0] << 8); 551 sc->sc_macaddr[1] = lladdr[5] | (lladdr[4] << 8) 552 | (lladdr[1] << 16) | (lladdr[2] << 24); 553 #endif 554 555 sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR; 556 557 /* 5. Load RCVADDR_LO with new pointer */ 558 bcmeth_rxq_reset(sc, &sc->sc_rxq); 559 560 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 561 __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET) 562 | RCVCTL_PARITY_DIS 563 | RCVCTL_OFLOW_CONTINUE 564 | __SHIFTIN(3, RCVCTL_BURSTLEN)); 565 566 /* 6. Load XMTADDR_LO with new pointer */ 567 bcmeth_txq_reset(sc, &sc->sc_txq); 568 569 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX 570 | XMTCTL_PARITY_DIS 571 | __SHIFTIN(3, XMTCTL_BURSTLEN)); 572 573 /* 7. Setup other UNIMAC registers */ 574 bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm); 575 bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0)); 576 bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32)); 577 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg); 578 579 uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL); 580 devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE; 581 devctl &= ~FLOW_CTRL_MODE; 582 devctl &= ~MIB_RD_RESET_EN; 583 devctl &= ~RXQ_OVERFLOW_CTRL_SEL; 584 devctl &= ~CPU_FLOW_CTRL_ON; 585 bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl); 586 587 /* Setup lazy receive (at most 1ms). */ 588 const struct cpu_softc * const cpu = curcpu()->ci_softc; 589 sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT) 590 | __SHIFTIN(cpu->cpu_clk.clk_apb / 1000, INTRCVLAZY_TIMEOUT); 591 bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy); 592 593 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 594 sc->sc_intmask |= XMTINT_0|XMTUF; 595 bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, 596 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE); 597 598 599 /* 12. Enable receive queues in RQUEUE, */ 600 sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF; 601 bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl, 602 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE); 603 604 bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 605 606 #if 0 607 aprint_normal_dev(sc->sc_dev, 608 "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n", 609 devctl, sc->sc_cmdcfg, 610 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl), 611 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl)); 612 #endif 613 614 sc->sc_soft_flags = 0; 615 616 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 617 618 ifp->if_flags |= IFF_RUNNING; 619 620 return error; 621 } 622 623 static void 624 bcmeth_ifstop(struct ifnet *ifp, int disable) 625 { 626 struct bcmeth_softc * const sc = ifp->if_softc; 627 struct bcmeth_txqueue * const txq = &sc->sc_txq; 628 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 629 630 KASSERT(!cpu_intr_p()); 631 632 sc->sc_soft_flags = 0; 633 sc->sc_work_flags = 0; 634 635 /* Disable Rx processing */ 636 bcmeth_write_4(sc, rxq->rxq_reg_rcvctl, 637 bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE); 638 639 /* Disable Tx processing */ 640 bcmeth_write_4(sc, txq->txq_reg_xmtctl, 641 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE); 642 643 /* Disable all interrupts */ 644 bcmeth_write_4(sc, GMAC_INTMASK, 0); 645 646 for (;;) { 647 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 648 uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 649 if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS 650 && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS) 651 break; 652 delay(50); 653 } 654 /* 655 * Now reset the controller. 656 * 657 * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register 658 * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register 659 */ 660 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET); 661 bcmeth_write_4(sc, GMAC_INTSTATUS, ~0); 662 sc->sc_intmask = 0; 663 ifp->if_flags &= ~IFF_RUNNING; 664 665 /* 666 * Let's consume any remaining transmitted packets. And if we are 667 * disabling the interface, purge ourselves of any untransmitted 668 * packets. But don't consume any received packets, just drop them. 669 * If we aren't disabling the interface, save the mbufs in the 670 * receive queue for reuse. 671 */ 672 bcmeth_rxq_purge(sc, &sc->sc_rxq, disable); 673 bcmeth_txq_consume(sc, &sc->sc_txq); 674 if (disable) { 675 bcmeth_txq_purge(sc, &sc->sc_txq); 676 IF_PURGE(&ifp->if_snd); 677 } 678 679 bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0); 680 } 681 682 static void 683 bcmeth_ifwatchdog(struct ifnet *ifp) 684 { 685 } 686 687 static int 688 bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 689 { 690 struct bcmeth_softc *sc = ifp->if_softc; 691 struct ifreq * const ifr = data; 692 const int s = splnet(); 693 int error; 694 695 switch (cmd) { 696 case SIOCSIFMEDIA: 697 case SIOCGIFMEDIA: 698 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 699 break; 700 701 default: 702 error = ether_ioctl(ifp, cmd, data); 703 if (error != ENETRESET) 704 break; 705 706 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 707 error = 0; 708 break; 709 } 710 error = bcmeth_ifinit(ifp); 711 break; 712 } 713 714 splx(s); 715 return error; 716 } 717 718 static void 719 bcmeth_rxq_desc_presync( 720 struct bcmeth_softc *sc, 721 struct bcmeth_rxqueue *rxq, 722 struct gmac_rxdb *rxdb, 723 size_t count) 724 { 725 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 726 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 727 BUS_DMASYNC_PREWRITE); 728 } 729 730 static void 731 bcmeth_rxq_desc_postsync( 732 struct bcmeth_softc *sc, 733 struct bcmeth_rxqueue *rxq, 734 struct gmac_rxdb *rxdb, 735 size_t count) 736 { 737 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 738 (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb), 739 BUS_DMASYNC_POSTWRITE); 740 } 741 742 static void 743 bcmeth_txq_desc_presync( 744 struct bcmeth_softc *sc, 745 struct bcmeth_txqueue *txq, 746 struct gmac_txdb *txdb, 747 size_t count) 748 { 749 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 750 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 751 BUS_DMASYNC_PREWRITE); 752 } 753 754 static void 755 bcmeth_txq_desc_postsync( 756 struct bcmeth_softc *sc, 757 struct bcmeth_txqueue *txq, 758 struct gmac_txdb *txdb, 759 size_t count) 760 { 761 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 762 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb), 763 BUS_DMASYNC_POSTWRITE); 764 } 765 766 static bus_dmamap_t 767 bcmeth_mapcache_get( 768 struct bcmeth_softc *sc, 769 struct bcmeth_mapcache *dmc) 770 { 771 KASSERT(dmc->dmc_nmaps > 0); 772 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 773 return dmc->dmc_maps[--dmc->dmc_nmaps]; 774 } 775 776 static void 777 bcmeth_mapcache_put( 778 struct bcmeth_softc *sc, 779 struct bcmeth_mapcache *dmc, 780 bus_dmamap_t map) 781 { 782 KASSERT(map != NULL); 783 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 784 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 785 } 786 787 static void 788 bcmeth_mapcache_destroy( 789 struct bcmeth_softc *sc, 790 struct bcmeth_mapcache *dmc) 791 { 792 const size_t dmc_size = 793 offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]); 794 795 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 796 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 797 } 798 kmem_intr_free(dmc, dmc_size); 799 } 800 801 static int 802 bcmeth_mapcache_create( 803 struct bcmeth_softc *sc, 804 struct bcmeth_mapcache **dmc_p, 805 size_t maxmaps, 806 size_t maxmapsize, 807 size_t maxseg) 808 { 809 const size_t dmc_size = 810 offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]); 811 struct bcmeth_mapcache * const dmc = 812 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 813 814 dmc->dmc_maxmaps = maxmaps; 815 dmc->dmc_nmaps = maxmaps; 816 dmc->dmc_maxmapsize = maxmapsize; 817 dmc->dmc_maxseg = maxseg; 818 819 for (u_int i = 0; i < maxmaps; i++) { 820 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 821 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 822 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 823 if (error) { 824 aprint_error_dev(sc->sc_dev, 825 "failed to creat dma map cache " 826 "entry %u of %zu: %d\n", 827 i, maxmaps, error); 828 while (i-- > 0) { 829 bus_dmamap_destroy(sc->sc_dmat, 830 dmc->dmc_maps[i]); 831 } 832 kmem_intr_free(dmc, dmc_size); 833 return error; 834 } 835 KASSERT(dmc->dmc_maps[i] != NULL); 836 } 837 838 *dmc_p = dmc; 839 840 return 0; 841 } 842 843 #if 0 844 static void 845 bcmeth_dmamem_free( 846 bus_dma_tag_t dmat, 847 size_t map_size, 848 bus_dma_segment_t *seg, 849 bus_dmamap_t map, 850 void *kvap) 851 { 852 bus_dmamap_destroy(dmat, map); 853 bus_dmamem_unmap(dmat, kvap, map_size); 854 bus_dmamem_free(dmat, seg, 1); 855 } 856 #endif 857 858 static int 859 bcmeth_dmamem_alloc( 860 bus_dma_tag_t dmat, 861 size_t map_size, 862 bus_dma_segment_t *seg, 863 bus_dmamap_t *map, 864 void **kvap) 865 { 866 int error; 867 int nseg; 868 869 *kvap = NULL; 870 *map = NULL; 871 872 error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0, 873 seg, 1, &nseg, 0); 874 if (error) 875 return error; 876 877 KASSERT(nseg == 1); 878 879 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0); 880 if (error == 0) { 881 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 882 map); 883 if (error == 0) { 884 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 885 NULL, 0); 886 if (error == 0) 887 return 0; 888 bus_dmamap_destroy(dmat, *map); 889 *map = NULL; 890 } 891 bus_dmamem_unmap(dmat, *kvap, map_size); 892 *kvap = NULL; 893 } 894 bus_dmamem_free(dmat, seg, nseg); 895 return 0; 896 } 897 898 static struct mbuf * 899 bcmeth_rx_buf_alloc( 900 struct bcmeth_softc *sc) 901 { 902 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 903 if (m == NULL) { 904 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 905 return NULL; 906 } 907 MCLGET(m, M_DONTWAIT); 908 if ((m->m_flags & M_EXT) == 0) { 909 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 910 m_freem(m); 911 return NULL; 912 } 913 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 914 915 bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache); 916 if (map == NULL) { 917 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 918 m_freem(m); 919 return NULL; 920 } 921 M_SETCTX(m, map); 922 m->m_len = m->m_pkthdr.len = MCLBYTES; 923 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 924 BUS_DMA_READ|BUS_DMA_NOWAIT); 925 if (error) { 926 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 927 error); 928 M_SETCTX(m, NULL); 929 m_freem(m); 930 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 931 return NULL; 932 } 933 KASSERT(map->dm_mapsize == MCLBYTES); 934 #ifdef BCMETH_RCVMAGIC 935 *mtod(m, uint32_t *) = htole32(BCMETH_RCVMAGIC); 936 bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t), 937 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 938 bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t), 939 map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD); 940 #else 941 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 942 BUS_DMASYNC_PREREAD); 943 #endif 944 945 return m; 946 } 947 948 static void 949 bcmeth_rx_map_unload( 950 struct bcmeth_softc *sc, 951 struct mbuf *m) 952 { 953 KASSERT(m); 954 for (; m != NULL; m = m->m_next) { 955 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 956 KASSERT(map); 957 KASSERT(map->dm_mapsize == MCLBYTES); 958 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 959 BUS_DMASYNC_POSTREAD); 960 bus_dmamap_unload(sc->sc_dmat, map); 961 bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map); 962 M_SETCTX(m, NULL); 963 } 964 } 965 966 static bool 967 bcmeth_rxq_produce( 968 struct bcmeth_softc *sc, 969 struct bcmeth_rxqueue *rxq) 970 { 971 struct gmac_rxdb *producer = rxq->rxq_producer; 972 bool produced = false; 973 974 while (rxq->rxq_inuse < rxq->rxq_threshold) { 975 struct mbuf *m; 976 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 977 if (m == NULL) { 978 m = bcmeth_rx_buf_alloc(sc); 979 if (m == NULL) { 980 printf("%s: bcmeth_rx_buf_alloc failed\n", __func__); 981 break; 982 } 983 } 984 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 985 KASSERT(map); 986 987 producer->rxdb_buflen = htole32(MCLBYTES); 988 producer->rxdb_addrlo = htole32(map->dm_segs[0].ds_addr); 989 producer->rxdb_flags &= htole32(RXDB_FLAG_ET); 990 *rxq->rxq_mtail = m; 991 rxq->rxq_mtail = &m->m_next; 992 m->m_len = MCLBYTES; 993 m->m_next = NULL; 994 rxq->rxq_inuse++; 995 if (++producer == rxq->rxq_last) { 996 membar_producer(); 997 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 998 rxq->rxq_last - rxq->rxq_producer); 999 producer = rxq->rxq_producer = rxq->rxq_first; 1000 } 1001 produced = true; 1002 } 1003 if (produced) { 1004 membar_producer(); 1005 if (producer != rxq->rxq_producer) { 1006 bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1007 producer - rxq->rxq_producer); 1008 rxq->rxq_producer = producer; 1009 } 1010 bcmeth_write_4(sc, rxq->rxq_reg_rcvptr, 1011 rxq->rxq_descmap->dm_segs[0].ds_addr 1012 + ((uintptr_t)producer & RCVPTR)); 1013 } 1014 return true; 1015 } 1016 1017 static void 1018 bcmeth_rx_input( 1019 struct bcmeth_softc *sc, 1020 struct mbuf *m, 1021 uint32_t rxdb_flags) 1022 { 1023 struct ifnet * const ifp = &sc->sc_if; 1024 1025 bcmeth_rx_map_unload(sc, m); 1026 1027 m_adj(m, sc->sc_rcvoffset); 1028 1029 /* 1030 * If we are in promiscuous mode and this isn't a multicast, check the 1031 * destination address to make sure it matches our own. If it doesn't, 1032 * mark the packet as being received promiscuously. 1033 */ 1034 if ((sc->sc_cmdcfg & PROMISC_EN) 1035 && (m->m_data[0] & 1) == 0 1036 && (*(uint16_t *)&m->m_data[0] != sc->sc_macaddr[0] 1037 || *(uint32_t *)&m->m_data[2] != sc->sc_macaddr[1])) { 1038 m->m_flags |= M_PROMISC; 1039 } 1040 m_set_rcvif(m, ifp); 1041 1042 ifp->if_ibytes += m->m_pkthdr.len; 1043 1044 /* 1045 * Let's give it to the network subsystm to deal with. 1046 */ 1047 #ifdef BCMETH_MPSAFE 1048 mutex_exit(sc->sc_lock); 1049 if_input(ifp, m); 1050 mutex_enter(sc->sc_lock); 1051 #else 1052 int s = splnet(); 1053 if_input(ifp, m); 1054 splx(s); 1055 #endif 1056 } 1057 1058 static bool 1059 bcmeth_rxq_consume( 1060 struct bcmeth_softc *sc, 1061 struct bcmeth_rxqueue *rxq, 1062 size_t atmost) 1063 { 1064 struct ifnet * const ifp = &sc->sc_if; 1065 struct gmac_rxdb *consumer = rxq->rxq_consumer; 1066 size_t rxconsumed = 0; 1067 bool didconsume = false; 1068 1069 while (atmost-- > 0) { 1070 if (consumer == rxq->rxq_producer) { 1071 KASSERT(rxq->rxq_inuse == 0); 1072 break; 1073 } 1074 1075 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1076 uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1077 if (consumer == rxq->rxq_first + currdscr) { 1078 break; 1079 } 1080 bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1); 1081 1082 /* 1083 * We own this packet again. Copy the rxsts word from it. 1084 */ 1085 rxconsumed++; 1086 didconsume = true; 1087 uint32_t rxsts; 1088 KASSERT(rxq->rxq_mhead != NULL); 1089 bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t); 1090 bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align, 1091 BUS_DMASYNC_POSTREAD); 1092 memcpy(&rxsts, rxq->rxq_mhead->m_data, 4); 1093 rxsts = le32toh(rxsts); 1094 #if 0 1095 KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd", 1096 currdscr, consumer - rxq->rxq_first); 1097 #endif 1098 1099 /* 1100 * Get the count of descriptors. Fetch the correct number 1101 * of mbufs. 1102 */ 1103 #ifdef BCMETH_RCVMAGIC 1104 size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1; 1105 #else 1106 size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1; 1107 #endif 1108 struct mbuf *m = rxq->rxq_mhead; 1109 struct mbuf *m_last = m; 1110 for (size_t i = 1; i < desc_count; i++) { 1111 if (++consumer == rxq->rxq_last) { 1112 consumer = rxq->rxq_first; 1113 } 1114 KASSERTMSG(consumer != rxq->rxq_first + currdscr, 1115 "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd", 1116 i, rxsts, desc_count, currdscr, 1117 consumer - rxq->rxq_first); 1118 m_last = m_last->m_next; 1119 } 1120 1121 /* 1122 * Now remove it/them from the list of enqueued mbufs. 1123 */ 1124 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1125 rxq->rxq_mtail = &rxq->rxq_mhead; 1126 m_last->m_next = NULL; 1127 1128 #ifdef BCMETH_RCVMAGIC 1129 if (rxsts == BCMETH_RCVMAGIC) { 1130 ifp->if_ierrors++; 1131 if ((m->m_ext.ext_paddr >> 28) == 8) { 1132 BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo); 1133 } else { 1134 BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi); 1135 } 1136 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1137 } else 1138 #endif /* BCMETH_RCVMAGIC */ 1139 if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) { 1140 aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n", 1141 consumer - rxq->rxq_first, desc_count, rxsts); 1142 /* 1143 * We encountered an error, take the mbufs and add them 1144 * to the rx bufcache so we can quickly reuse them. 1145 */ 1146 ifp->if_ierrors++; 1147 do { 1148 struct mbuf *m0 = m->m_next; 1149 m->m_next = NULL; 1150 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1151 m = m0; 1152 } while (m); 1153 } else { 1154 uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN); 1155 framelen += sc->sc_rcvoffset; 1156 m->m_pkthdr.len = framelen; 1157 if (desc_count == 1) { 1158 KASSERT(framelen <= MCLBYTES); 1159 m->m_len = framelen; 1160 } else { 1161 m_last->m_len = framelen & (MCLBYTES - 1); 1162 } 1163 1164 #ifdef BCMETH_MPSAFE 1165 /* 1166 * Wrap at the last entry! 1167 */ 1168 if (++consumer == rxq->rxq_last) { 1169 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1170 rxq->rxq_consumer = rxq->rxq_first; 1171 } else { 1172 rxq->rxq_consumer = consumer; 1173 } 1174 rxq->rxq_inuse -= rxconsumed; 1175 #endif /* BCMETH_MPSAFE */ 1176 1177 /* 1178 * Receive the packet (which releases our lock) 1179 */ 1180 bcmeth_rx_input(sc, m, rxsts); 1181 1182 #ifdef BCMETH_MPSAFE 1183 /* 1184 * Since we had to give up our lock, we need to 1185 * refresh these. 1186 */ 1187 consumer = rxq->rxq_consumer; 1188 rxconsumed = 0; 1189 continue; 1190 #endif /* BCMETH_MPSAFE */ 1191 } 1192 1193 /* 1194 * Wrap at the last entry! 1195 */ 1196 if (++consumer == rxq->rxq_last) { 1197 KASSERT(consumer[-1].rxdb_flags & htole32(RXDB_FLAG_ET)); 1198 consumer = rxq->rxq_first; 1199 } 1200 } 1201 1202 /* 1203 * Update queue info. 1204 */ 1205 rxq->rxq_consumer = consumer; 1206 rxq->rxq_inuse -= rxconsumed; 1207 1208 /* 1209 * Did we consume anything? 1210 */ 1211 return didconsume; 1212 } 1213 1214 static void 1215 bcmeth_rxq_purge( 1216 struct bcmeth_softc *sc, 1217 struct bcmeth_rxqueue *rxq, 1218 bool discard) 1219 { 1220 struct mbuf *m; 1221 1222 if ((m = rxq->rxq_mhead) != NULL) { 1223 if (discard) { 1224 bcmeth_rx_map_unload(sc, m); 1225 m_freem(m); 1226 } else { 1227 while (m != NULL) { 1228 struct mbuf *m0 = m->m_next; 1229 m->m_next = NULL; 1230 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1231 m = m0; 1232 } 1233 } 1234 1235 } 1236 1237 rxq->rxq_mhead = NULL; 1238 rxq->rxq_mtail = &rxq->rxq_mhead; 1239 rxq->rxq_inuse = 0; 1240 } 1241 1242 static void 1243 bcmeth_rxq_reset( 1244 struct bcmeth_softc *sc, 1245 struct bcmeth_rxqueue *rxq) 1246 { 1247 /* 1248 * sync all the descriptors 1249 */ 1250 bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1251 rxq->rxq_last - rxq->rxq_first); 1252 1253 /* 1254 * Make sure we own all descriptors in the ring. 1255 */ 1256 struct gmac_rxdb *rxdb; 1257 for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) { 1258 rxdb->rxdb_flags = htole32(RXDB_FLAG_IC); 1259 } 1260 1261 /* 1262 * Last descriptor has the wrap flag. 1263 */ 1264 rxdb->rxdb_flags = htole32(RXDB_FLAG_ET|RXDB_FLAG_IC); 1265 1266 /* 1267 * Reset the producer consumer indexes. 1268 */ 1269 rxq->rxq_consumer = rxq->rxq_first; 1270 rxq->rxq_producer = rxq->rxq_first; 1271 rxq->rxq_inuse = 0; 1272 if (rxq->rxq_threshold < BCMETH_MINRXMBUFS) 1273 rxq->rxq_threshold = BCMETH_MINRXMBUFS; 1274 1275 sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF; 1276 1277 /* 1278 * Restart the receiver at the first descriptor 1279 */ 1280 bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo, 1281 rxq->rxq_descmap->dm_segs[0].ds_addr); 1282 } 1283 1284 static int 1285 bcmeth_rxq_attach( 1286 struct bcmeth_softc *sc, 1287 struct bcmeth_rxqueue *rxq, 1288 u_int qno) 1289 { 1290 size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]); 1291 int error; 1292 void *descs; 1293 1294 KASSERT(desc_count == 256 || desc_count == 512); 1295 1296 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1297 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1298 if (error) 1299 return error; 1300 1301 memset(descs, 0, BCMETH_RINGSIZE); 1302 rxq->rxq_first = descs; 1303 rxq->rxq_last = rxq->rxq_first + desc_count; 1304 rxq->rxq_consumer = descs; 1305 rxq->rxq_producer = descs; 1306 1307 bcmeth_rxq_purge(sc, rxq, true); 1308 bcmeth_rxq_reset(sc, rxq); 1309 1310 rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW; 1311 rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL; 1312 rxq->rxq_reg_rcvptr = GMAC_RCVPTR; 1313 rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0; 1314 rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1; 1315 1316 return 0; 1317 } 1318 1319 static bool 1320 bcmeth_txq_active_p( 1321 struct bcmeth_softc * const sc, 1322 struct bcmeth_txqueue *txq) 1323 { 1324 return !IF_IS_EMPTY(&txq->txq_mbufs); 1325 } 1326 1327 static bool 1328 bcmeth_txq_fillable_p( 1329 struct bcmeth_softc * const sc, 1330 struct bcmeth_txqueue *txq) 1331 { 1332 return txq->txq_free >= txq->txq_threshold; 1333 } 1334 1335 static int 1336 bcmeth_txq_attach( 1337 struct bcmeth_softc *sc, 1338 struct bcmeth_txqueue *txq, 1339 u_int qno) 1340 { 1341 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]); 1342 int error; 1343 void *descs; 1344 1345 KASSERT(desc_count == 256 || desc_count == 512); 1346 1347 error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE, 1348 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1349 if (error) 1350 return error; 1351 1352 memset(descs, 0, BCMETH_RINGSIZE); 1353 txq->txq_first = descs; 1354 txq->txq_last = txq->txq_first + desc_count; 1355 txq->txq_consumer = descs; 1356 txq->txq_producer = descs; 1357 1358 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS); 1359 1360 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW; 1361 txq->txq_reg_xmtctl = GMAC_XMTCONTROL; 1362 txq->txq_reg_xmtptr = GMAC_XMTPTR; 1363 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0; 1364 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1; 1365 1366 bcmeth_txq_reset(sc, txq); 1367 1368 return 0; 1369 } 1370 1371 static int 1372 bcmeth_txq_map_load( 1373 struct bcmeth_softc *sc, 1374 struct bcmeth_txqueue *txq, 1375 struct mbuf *m) 1376 { 1377 bus_dmamap_t map; 1378 int error; 1379 1380 map = M_GETCTX(m, bus_dmamap_t); 1381 if (map != NULL) 1382 return 0; 1383 1384 map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache); 1385 if (map == NULL) 1386 return ENOMEM; 1387 1388 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1389 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1390 if (error) 1391 return error; 1392 1393 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1394 BUS_DMASYNC_PREWRITE); 1395 M_SETCTX(m, map); 1396 return 0; 1397 } 1398 1399 static void 1400 bcmeth_txq_map_unload( 1401 struct bcmeth_softc *sc, 1402 struct bcmeth_txqueue *txq, 1403 struct mbuf *m) 1404 { 1405 KASSERT(m); 1406 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1407 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1408 BUS_DMASYNC_POSTWRITE); 1409 bus_dmamap_unload(sc->sc_dmat, map); 1410 bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map); 1411 } 1412 1413 static bool 1414 bcmeth_txq_produce( 1415 struct bcmeth_softc *sc, 1416 struct bcmeth_txqueue *txq, 1417 struct mbuf *m) 1418 { 1419 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1420 1421 if (map->dm_nsegs > txq->txq_free) 1422 return false; 1423 1424 /* 1425 * TCP Offload flag must be set in the first descriptor. 1426 */ 1427 struct gmac_txdb *producer = txq->txq_producer; 1428 uint32_t first_flags = TXDB_FLAG_SF; 1429 uint32_t last_flags = TXDB_FLAG_EF; 1430 1431 /* 1432 * If we've produced enough descriptors without consuming any 1433 * we need to ask for an interrupt to reclaim some. 1434 */ 1435 txq->txq_lastintr += map->dm_nsegs; 1436 if (txq->txq_lastintr >= txq->txq_threshold 1437 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1438 txq->txq_lastintr = 0; 1439 last_flags |= TXDB_FLAG_IC; 1440 } 1441 1442 KASSERT(producer != txq->txq_last); 1443 1444 struct gmac_txdb *start = producer; 1445 size_t count = map->dm_nsegs; 1446 producer->txdb_flags |= htole32(first_flags); 1447 producer->txdb_addrlo = htole32(map->dm_segs[0].ds_addr); 1448 producer->txdb_buflen = htole32(map->dm_segs[0].ds_len); 1449 for (u_int i = 1; i < map->dm_nsegs; i++) { 1450 #if 0 1451 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1452 le32toh(producer->txdb_flags), 1453 le32toh(producer->txdb_buflen), 1454 le32toh(producer->txdb_addrlo), 1455 le32toh(producer->txdb_addrhi)); 1456 #endif 1457 if (__predict_false(++producer == txq->txq_last)) { 1458 bcmeth_txq_desc_presync(sc, txq, start, 1459 txq->txq_last - start); 1460 count -= txq->txq_last - start; 1461 producer = txq->txq_first; 1462 start = txq->txq_first; 1463 } 1464 producer->txdb_addrlo = htole32(map->dm_segs[i].ds_addr); 1465 producer->txdb_buflen = htole32(map->dm_segs[i].ds_len); 1466 } 1467 producer->txdb_flags |= htole32(last_flags); 1468 #if 0 1469 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first, 1470 le32toh(producer->txdb_flags), le32toh(producer->txdb_buflen), 1471 le32toh(producer->txdb_addrlo), le32toh(producer->txdb_addrhi)); 1472 #endif 1473 if (count) 1474 bcmeth_txq_desc_presync(sc, txq, start, count); 1475 1476 /* 1477 * Reduce free count by the number of segments we consumed. 1478 */ 1479 txq->txq_free -= map->dm_nsegs; 1480 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1481 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0); 1482 KASSERT(producer->txdb_flags & htole32(TXDB_FLAG_EF)); 1483 1484 #if 0 1485 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n", 1486 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1487 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1488 #endif 1489 1490 if (producer + 1 == txq->txq_last) 1491 txq->txq_producer = txq->txq_first; 1492 else 1493 txq->txq_producer = producer + 1; 1494 IF_ENQUEUE(&txq->txq_mbufs, m); 1495 1496 /* 1497 * Let the transmitter know there's more to do 1498 */ 1499 bcmeth_write_4(sc, txq->txq_reg_xmtptr, 1500 txq->txq_descmap->dm_segs[0].ds_addr 1501 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR)); 1502 1503 return true; 1504 } 1505 1506 static struct mbuf * 1507 bcmeth_copy_packet(struct mbuf *m) 1508 { 1509 struct mbuf *mext = NULL; 1510 size_t misalignment = 0; 1511 size_t hlen = 0; 1512 1513 for (mext = m; mext != NULL; mext = mext->m_next) { 1514 if (mext->m_flags & M_EXT) { 1515 misalignment = mtod(mext, vaddr_t) & arm_dcache_align; 1516 break; 1517 } 1518 hlen += m->m_len; 1519 } 1520 1521 struct mbuf *n = m->m_next; 1522 if (m != mext && hlen + misalignment <= MHLEN && false) { 1523 KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]); 1524 size_t oldoff = m->m_data - m->m_pktdat; 1525 size_t off; 1526 if (mext == NULL) { 1527 off = (oldoff + hlen > MHLEN) ? 0 : oldoff; 1528 } else { 1529 off = MHLEN - (hlen + misalignment); 1530 } 1531 KASSERT(off + hlen + misalignment <= MHLEN); 1532 if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) { 1533 memmove(&m->m_pktdat[off], m->m_data, m->m_len); 1534 m->m_data = &m->m_pktdat[off]; 1535 } 1536 m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]); 1537 m->m_len = hlen; 1538 m->m_next = mext; 1539 while (n != mext) { 1540 n = m_free(n); 1541 } 1542 return m; 1543 } 1544 1545 struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type); 1546 if (m0 == NULL) { 1547 return NULL; 1548 } 1549 M_COPY_PKTHDR(m0, m); 1550 MCLAIM(m0, m->m_owner); 1551 if (m0->m_pkthdr.len > MHLEN) { 1552 MCLGET(m0, M_DONTWAIT); 1553 if ((m0->m_flags & M_EXT) == 0) { 1554 m_freem(m0); 1555 return NULL; 1556 } 1557 } 1558 m0->m_len = m->m_pkthdr.len; 1559 m_copydata(m, 0, m0->m_len, mtod(m0, void *)); 1560 m_freem(m); 1561 return m0; 1562 } 1563 1564 static bool 1565 bcmeth_txq_enqueue( 1566 struct bcmeth_softc *sc, 1567 struct bcmeth_txqueue *txq) 1568 { 1569 for (;;) { 1570 if (IF_QFULL(&txq->txq_mbufs)) 1571 return false; 1572 struct mbuf *m = txq->txq_next; 1573 if (m == NULL) { 1574 int s = splnet(); 1575 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1576 splx(s); 1577 if (m == NULL) 1578 return true; 1579 M_SETCTX(m, NULL); 1580 } else { 1581 txq->txq_next = NULL; 1582 } 1583 /* 1584 * If LINK2 is set and this packet uses multiple mbufs, 1585 * consolidate it into a single mbuf. 1586 */ 1587 if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) { 1588 struct mbuf *m0 = bcmeth_copy_packet(m); 1589 if (m0 == NULL) { 1590 txq->txq_next = m; 1591 return true; 1592 } 1593 m = m0; 1594 } 1595 int error = bcmeth_txq_map_load(sc, txq, m); 1596 if (error) { 1597 aprint_error_dev(sc->sc_dev, 1598 "discarded packet due to " 1599 "dmamap load failure: %d\n", error); 1600 m_freem(m); 1601 continue; 1602 } 1603 KASSERT(txq->txq_next == NULL); 1604 if (!bcmeth_txq_produce(sc, txq, m)) { 1605 txq->txq_next = m; 1606 return false; 1607 } 1608 KASSERT(txq->txq_next == NULL); 1609 } 1610 } 1611 1612 static bool 1613 bcmeth_txq_consume( 1614 struct bcmeth_softc *sc, 1615 struct bcmeth_txqueue *txq) 1616 { 1617 struct ifnet * const ifp = &sc->sc_if; 1618 struct gmac_txdb *consumer = txq->txq_consumer; 1619 size_t txfree = 0; 1620 1621 #if 0 1622 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 1623 #endif 1624 1625 for (;;) { 1626 if (consumer == txq->txq_producer) { 1627 txq->txq_consumer = consumer; 1628 txq->txq_free += txfree; 1629 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1630 #if 0 1631 printf("%s: empty: freed %zu descriptors going from %zu to %zu\n", 1632 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 1633 #endif 1634 KASSERT(txq->txq_lastintr == 0); 1635 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 1636 return true; 1637 } 1638 bcmeth_txq_desc_postsync(sc, txq, consumer, 1); 1639 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0); 1640 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) { 1641 txq->txq_consumer = consumer; 1642 txq->txq_free += txfree; 1643 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 1644 #if 0 1645 printf("%s: freed %zu descriptors\n", 1646 __func__, txfree); 1647 #endif 1648 return bcmeth_txq_fillable_p(sc, txq); 1649 } 1650 1651 /* 1652 * If this is the last descriptor in the chain, get the 1653 * mbuf, free its dmamap, and free the mbuf chain itself. 1654 */ 1655 const uint32_t txdb_flags = le32toh(consumer->txdb_flags); 1656 if (txdb_flags & TXDB_FLAG_EF) { 1657 struct mbuf *m; 1658 1659 IF_DEQUEUE(&txq->txq_mbufs, m); 1660 KASSERT(m); 1661 bcmeth_txq_map_unload(sc, txq, m); 1662 #if 0 1663 printf("%s: mbuf %p: consumed a %u byte packet\n", 1664 __func__, m, m->m_pkthdr.len); 1665 #endif 1666 bpf_mtap(ifp, m); 1667 ifp->if_opackets++; 1668 ifp->if_obytes += m->m_pkthdr.len; 1669 if (m->m_flags & M_MCAST) 1670 ifp->if_omcasts++; 1671 m_freem(m); 1672 } 1673 1674 /* 1675 * We own this packet again. Clear all flags except wrap. 1676 */ 1677 txfree++; 1678 1679 /* 1680 * Wrap at the last entry! 1681 */ 1682 if (txdb_flags & TXDB_FLAG_ET) { 1683 consumer->txdb_flags = htole32(TXDB_FLAG_ET); 1684 KASSERT(consumer + 1 == txq->txq_last); 1685 consumer = txq->txq_first; 1686 } else { 1687 consumer->txdb_flags = 0; 1688 consumer++; 1689 KASSERT(consumer < txq->txq_last); 1690 } 1691 } 1692 } 1693 1694 static void 1695 bcmeth_txq_purge( 1696 struct bcmeth_softc *sc, 1697 struct bcmeth_txqueue *txq) 1698 { 1699 struct mbuf *m; 1700 KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0); 1701 1702 for (;;) { 1703 IF_DEQUEUE(&txq->txq_mbufs, m); 1704 if (m == NULL) 1705 break; 1706 bcmeth_txq_map_unload(sc, txq, m); 1707 m_freem(m); 1708 } 1709 if ((m = txq->txq_next) != NULL) { 1710 txq->txq_next = NULL; 1711 bcmeth_txq_map_unload(sc, txq, m); 1712 m_freem(m); 1713 } 1714 } 1715 1716 static void 1717 bcmeth_txq_reset( 1718 struct bcmeth_softc *sc, 1719 struct bcmeth_txqueue *txq) 1720 { 1721 /* 1722 * sync all the descriptors 1723 */ 1724 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first, 1725 txq->txq_last - txq->txq_first); 1726 1727 /* 1728 * Make sure we own all descriptors in the ring. 1729 */ 1730 struct gmac_txdb *txdb; 1731 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) { 1732 txdb->txdb_flags = 0; 1733 } 1734 1735 /* 1736 * Last descriptor has the wrap flag. 1737 */ 1738 txdb->txdb_flags = htole32(TXDB_FLAG_ET); 1739 1740 /* 1741 * Reset the producer consumer indexes. 1742 */ 1743 txq->txq_consumer = txq->txq_first; 1744 txq->txq_producer = txq->txq_first; 1745 txq->txq_free = txq->txq_last - txq->txq_first - 1; 1746 txq->txq_threshold = txq->txq_free / 2; 1747 txq->txq_lastintr = 0; 1748 1749 /* 1750 * What do we want to get interrupted on? 1751 */ 1752 sc->sc_intmask |= XMTINT_0 | XMTUF; 1753 1754 /* 1755 * Restart the transmiter at the first descriptor 1756 */ 1757 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo, 1758 txq->txq_descmap->dm_segs->ds_addr); 1759 } 1760 1761 static void 1762 bcmeth_ifstart(struct ifnet *ifp) 1763 { 1764 struct bcmeth_softc * const sc = ifp->if_softc; 1765 1766 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 1767 return; 1768 } 1769 1770 #ifdef BCMETH_MPSAFETX 1771 if (cpu_intr_p()) { 1772 #endif 1773 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 1774 softint_schedule(sc->sc_soft_ih); 1775 #ifdef BCMETH_MPSAFETX 1776 } else { 1777 /* 1778 * Either we are in a softintr thread already or some other 1779 * thread so just borrow it to do the send and save ourselves 1780 * the overhead of a fast soft int. 1781 */ 1782 bcmeth_soft_txintr(sc); 1783 } 1784 #endif 1785 } 1786 1787 int 1788 bcmeth_intr(void *arg) 1789 { 1790 struct bcmeth_softc * const sc = arg; 1791 uint32_t soft_flags = 0; 1792 uint32_t work_flags = 0; 1793 int rv = 0; 1794 1795 mutex_enter(sc->sc_hwlock); 1796 1797 uint32_t intmask = sc->sc_intmask; 1798 BCMETH_EVCNT_INCR(sc->sc_ev_intr); 1799 1800 for (;;) { 1801 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 1802 intstatus &= intmask; 1803 bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */ 1804 if (intstatus == 0) { 1805 break; 1806 } 1807 #if 0 1808 aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n", 1809 __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK)); 1810 #endif 1811 if (intstatus & RCVINT) { 1812 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 1813 intmask &= ~RCVINT; 1814 1815 uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0); 1816 uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR); 1817 if (descs < rxq->rxq_consumer - rxq->rxq_first) { 1818 /* 1819 * We wrapped at the end so count how far 1820 * we are from the end. 1821 */ 1822 descs += rxq->rxq_last - rxq->rxq_consumer; 1823 } else { 1824 descs -= rxq->rxq_consumer - rxq->rxq_first; 1825 } 1826 /* 1827 * If we "timedout" we can't be hogging so use 1828 * softints. If we exceeded then we might hogging 1829 * so let the workqueue deal with them. 1830 */ 1831 const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT); 1832 if (descs < framecount 1833 || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) { 1834 soft_flags |= SOFT_RXINTR; 1835 } else { 1836 work_flags |= WORK_RXINTR; 1837 } 1838 } 1839 1840 if (intstatus & XMTINT_0) { 1841 intmask &= ~XMTINT_0; 1842 soft_flags |= SOFT_TXINTR; 1843 } 1844 1845 if (intstatus & RCVDESCUF) { 1846 intmask &= ~RCVDESCUF; 1847 work_flags |= WORK_RXUNDERFLOW; 1848 } 1849 1850 intstatus &= intmask; 1851 if (intstatus) { 1852 aprint_error_dev(sc->sc_dev, 1853 "intr: intstatus=%#x\n", intstatus); 1854 aprint_error_dev(sc->sc_dev, 1855 "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n", 1856 sc->sc_rxq.rxq_first, 1857 sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr, 1858 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr), 1859 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0), 1860 bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1)); 1861 aprint_error_dev(sc->sc_dev, 1862 "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n", 1863 sc->sc_txq.txq_first, 1864 sc->sc_txq.txq_descmap->dm_segs[0].ds_addr, 1865 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr), 1866 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0), 1867 bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1)); 1868 intmask &= ~intstatus; 1869 work_flags |= WORK_REINIT; 1870 break; 1871 } 1872 } 1873 1874 if (intmask != sc->sc_intmask) { 1875 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1876 } 1877 1878 if (work_flags) { 1879 if (sc->sc_work_flags == 0) { 1880 workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL); 1881 } 1882 atomic_or_32(&sc->sc_work_flags, work_flags); 1883 rv = 1; 1884 } 1885 1886 if (soft_flags) { 1887 if (sc->sc_soft_flags == 0) { 1888 softint_schedule(sc->sc_soft_ih); 1889 } 1890 atomic_or_32(&sc->sc_soft_flags, soft_flags); 1891 rv = 1; 1892 } 1893 1894 mutex_exit(sc->sc_hwlock); 1895 1896 return rv; 1897 } 1898 1899 #ifdef BCMETH_MPSAFETX 1900 void 1901 bcmeth_soft_txintr(struct bcmeth_softc *sc) 1902 { 1903 mutex_enter(sc->sc_lock); 1904 /* 1905 * Let's do what we came here for. Consume transmitted 1906 * packets off the the transmit ring. 1907 */ 1908 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1909 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1910 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1911 sc->sc_if.if_flags |= IFF_OACTIVE; 1912 } else { 1913 sc->sc_if.if_flags &= ~IFF_OACTIVE; 1914 } 1915 if (sc->sc_if.if_flags & IFF_RUNNING) { 1916 mutex_spin_enter(sc->sc_hwlock); 1917 sc->sc_intmask |= XMTINT_0; 1918 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1919 mutex_spin_exit(sc->sc_hwlock); 1920 } 1921 mutex_exit(sc->sc_lock); 1922 } 1923 #endif /* BCMETH_MPSAFETX */ 1924 1925 void 1926 bcmeth_soft_intr(void *arg) 1927 { 1928 struct bcmeth_softc * const sc = arg; 1929 struct ifnet * const ifp = &sc->sc_if; 1930 uint32_t intmask = 0; 1931 1932 mutex_enter(sc->sc_lock); 1933 1934 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 1935 1936 BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr); 1937 1938 if ((soft_flags & SOFT_TXINTR) 1939 || bcmeth_txq_active_p(sc, &sc->sc_txq)) { 1940 /* 1941 * Let's do what we came here for. Consume transmitted 1942 * packets off the the transmit ring. 1943 */ 1944 if (!bcmeth_txq_consume(sc, &sc->sc_txq) 1945 || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) { 1946 BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall); 1947 ifp->if_flags |= IFF_OACTIVE; 1948 } else { 1949 ifp->if_flags &= ~IFF_OACTIVE; 1950 } 1951 intmask |= XMTINT_0; 1952 } 1953 1954 if (soft_flags & SOFT_RXINTR) { 1955 /* 1956 * Let's consume 1957 */ 1958 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 1959 sc->sc_rxq.rxq_threshold / 4)) { 1960 /* 1961 * We've consumed a quarter of the ring and still have 1962 * more to do. Refill the ring. 1963 */ 1964 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1965 } 1966 intmask |= RCVINT; 1967 } 1968 1969 if (ifp->if_flags & IFF_RUNNING) { 1970 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1971 mutex_spin_enter(sc->sc_hwlock); 1972 sc->sc_intmask |= intmask; 1973 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1974 mutex_spin_exit(sc->sc_hwlock); 1975 } 1976 1977 mutex_exit(sc->sc_lock); 1978 } 1979 1980 void 1981 bcmeth_worker(struct work *wk, void *arg) 1982 { 1983 struct bcmeth_softc * const sc = arg; 1984 struct ifnet * const ifp = &sc->sc_if; 1985 uint32_t intmask = 0; 1986 1987 mutex_enter(sc->sc_lock); 1988 1989 BCMETH_EVCNT_INCR(sc->sc_ev_work); 1990 1991 uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0); 1992 if (work_flags & WORK_REINIT) { 1993 int s = splnet(); 1994 sc->sc_soft_flags = 0; 1995 bcmeth_ifinit(ifp); 1996 splx(s); 1997 work_flags &= ~WORK_RXUNDERFLOW; 1998 } 1999 2000 if (work_flags & WORK_RXUNDERFLOW) { 2001 struct bcmeth_rxqueue * const rxq = &sc->sc_rxq; 2002 size_t threshold = 5 * rxq->rxq_threshold / 4; 2003 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2004 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2005 } else { 2006 intmask |= RCVDESCUF; 2007 } 2008 aprint_normal_dev(sc->sc_dev, 2009 "increasing receive buffers from %zu to %zu\n", 2010 rxq->rxq_threshold, threshold); 2011 rxq->rxq_threshold = threshold; 2012 } 2013 2014 if (work_flags & WORK_RXINTR) { 2015 /* 2016 * Let's consume 2017 */ 2018 while (bcmeth_rxq_consume(sc, &sc->sc_rxq, 2019 sc->sc_rxq.rxq_threshold / 4)) { 2020 /* 2021 * We've consumed a quarter of the ring and still have 2022 * more to do. Refill the ring. 2023 */ 2024 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2025 } 2026 intmask |= RCVINT; 2027 } 2028 2029 if (ifp->if_flags & IFF_RUNNING) { 2030 bcmeth_rxq_produce(sc, &sc->sc_rxq); 2031 #if 0 2032 uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS); 2033 if (intstatus & RCVINT) { 2034 bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT); 2035 work_flags |= WORK_RXINTR; 2036 continue; 2037 } 2038 #endif 2039 mutex_spin_enter(sc->sc_hwlock); 2040 sc->sc_intmask |= intmask; 2041 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 2042 mutex_spin_exit(sc->sc_hwlock); 2043 } 2044 2045 mutex_exit(sc->sc_lock); 2046 } 2047