1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 */ 29 30 #include "opt_ifpoll.h" 31 #include "opt_jme.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/serialize2.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_poll.h> 54 #include <net/ifq_var.h> 55 #include <net/toeplitz.h> 56 #include <net/toeplitz2.h> 57 #include <net/vlan/if_vlan_var.h> 58 #include <net/vlan/if_vlan_ether.h> 59 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 63 #include <dev/netif/mii_layer/miivar.h> 64 #include <dev/netif/mii_layer/jmphyreg.h> 65 66 #include <bus/pci/pcireg.h> 67 #include <bus/pci/pcivar.h> 68 #include <bus/pci/pcidevs.h> 69 70 #include <dev/netif/jme/if_jmereg.h> 71 #include <dev/netif/jme/if_jmevar.h> 72 73 #include "miibus_if.h" 74 75 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */ 76 77 #define JME_TX_SERIALIZE 1 78 #define JME_RX_SERIALIZE 2 79 80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 81 82 #ifdef JME_RSS_DEBUG 83 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 84 do { \ 85 if ((sc)->jme_rss_debug >= (lvl)) \ 86 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 87 } while (0) 88 #else /* !JME_RSS_DEBUG */ 89 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 90 #endif /* JME_RSS_DEBUG */ 91 92 static int jme_probe(device_t); 93 static int jme_attach(device_t); 94 static int jme_detach(device_t); 95 static int jme_shutdown(device_t); 96 static int jme_suspend(device_t); 97 static int jme_resume(device_t); 98 99 static int jme_miibus_readreg(device_t, int, int); 100 static int jme_miibus_writereg(device_t, int, int, int); 101 static void jme_miibus_statchg(device_t); 102 103 static void jme_init(void *); 104 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 105 static void jme_start(struct ifnet *); 106 static void jme_watchdog(struct ifnet *); 107 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 108 static int jme_mediachange(struct ifnet *); 109 #ifdef IFPOLL_ENABLE 110 static void jme_npoll(struct ifnet *, struct ifpoll_info *); 111 static void jme_npoll_status(struct ifnet *); 112 static void jme_npoll_rx(struct ifnet *, void *, int); 113 static void jme_npoll_tx(struct ifnet *, void *, int); 114 #endif 115 static void jme_serialize(struct ifnet *, enum ifnet_serialize); 116 static void jme_deserialize(struct ifnet *, enum ifnet_serialize); 117 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize); 118 #ifdef INVARIANTS 119 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize, 120 boolean_t); 121 #endif 122 123 static void jme_intr(void *); 124 static void jme_msix_tx(void *); 125 static void jme_msix_rx(void *); 126 static void jme_msix_status(void *); 127 static void jme_txeof(struct jme_txdata *); 128 static void jme_rxeof(struct jme_rxdata *, int); 129 static void jme_rx_intr(struct jme_softc *, uint32_t); 130 static void jme_enable_intr(struct jme_softc *); 131 static void jme_disable_intr(struct jme_softc *); 132 static void jme_rx_restart(struct jme_softc *, uint32_t); 133 134 static int jme_msix_setup(device_t); 135 static void jme_msix_teardown(device_t, int); 136 static int jme_intr_setup(device_t); 137 static void jme_intr_teardown(device_t); 138 static void jme_msix_try_alloc(device_t); 139 static void jme_msix_free(device_t); 140 static int jme_intr_alloc(device_t); 141 static void jme_intr_free(device_t); 142 static int jme_dma_alloc(struct jme_softc *); 143 static void jme_dma_free(struct jme_softc *); 144 static int jme_init_rx_ring(struct jme_rxdata *); 145 static void jme_init_tx_ring(struct jme_txdata *); 146 static void jme_init_ssb(struct jme_softc *); 147 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int); 148 static int jme_encap(struct jme_txdata *, struct mbuf **, int *); 149 static void jme_rxpkt(struct jme_rxdata *); 150 static int jme_rxring_dma_alloc(struct jme_rxdata *); 151 static int jme_rxbuf_dma_alloc(struct jme_rxdata *); 152 static int jme_rxbuf_dma_filter(void *, bus_addr_t); 153 154 static void jme_tick(void *); 155 static void jme_stop(struct jme_softc *); 156 static void jme_reset(struct jme_softc *); 157 static void jme_set_msinum(struct jme_softc *); 158 static void jme_set_vlan(struct jme_softc *); 159 static void jme_set_filter(struct jme_softc *); 160 static void jme_stop_tx(struct jme_softc *); 161 static void jme_stop_rx(struct jme_softc *); 162 static void jme_mac_config(struct jme_softc *); 163 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 164 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 165 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 166 #ifdef notyet 167 static void jme_setwol(struct jme_softc *); 168 static void jme_setlinkspeed(struct jme_softc *); 169 #endif 170 static void jme_set_tx_coal(struct jme_softc *); 171 static void jme_set_rx_coal(struct jme_softc *); 172 static void jme_enable_rss(struct jme_softc *); 173 static void jme_disable_rss(struct jme_softc *); 174 static void jme_serialize_skipmain(struct jme_softc *); 175 static void jme_deserialize_skipmain(struct jme_softc *); 176 177 static void jme_sysctl_node(struct jme_softc *); 178 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 179 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 180 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 181 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 182 #ifdef IFPOLL_ENABLE 183 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 184 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 185 #endif 186 187 /* 188 * Devices supported by this driver. 189 */ 190 static const struct jme_dev { 191 uint16_t jme_vendorid; 192 uint16_t jme_deviceid; 193 uint32_t jme_caps; 194 const char *jme_name; 195 } jme_devs[] = { 196 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 197 JME_CAP_JUMBO, 198 "JMicron Inc, JMC250 Gigabit Ethernet" }, 199 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 200 JME_CAP_FASTETH, 201 "JMicron Inc, JMC260 Fast Ethernet" }, 202 { 0, 0, 0, NULL } 203 }; 204 205 static device_method_t jme_methods[] = { 206 /* Device interface. */ 207 DEVMETHOD(device_probe, jme_probe), 208 DEVMETHOD(device_attach, jme_attach), 209 DEVMETHOD(device_detach, jme_detach), 210 DEVMETHOD(device_shutdown, jme_shutdown), 211 DEVMETHOD(device_suspend, jme_suspend), 212 DEVMETHOD(device_resume, jme_resume), 213 214 /* Bus interface. */ 215 DEVMETHOD(bus_print_child, bus_generic_print_child), 216 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 217 218 /* MII interface. */ 219 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 220 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 221 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 222 223 { NULL, NULL } 224 }; 225 226 static driver_t jme_driver = { 227 "jme", 228 jme_methods, 229 sizeof(struct jme_softc) 230 }; 231 232 static devclass_t jme_devclass; 233 234 DECLARE_DUMMY_MODULE(if_jme); 235 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 236 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL); 237 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL); 238 239 static const struct { 240 uint32_t jme_coal; 241 uint32_t jme_comp; 242 uint32_t jme_empty; 243 } jme_rx_status[JME_NRXRING_MAX] = { 244 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP, 245 INTR_RXQ0_DESC_EMPTY }, 246 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP, 247 INTR_RXQ1_DESC_EMPTY }, 248 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP, 249 INTR_RXQ2_DESC_EMPTY }, 250 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP, 251 INTR_RXQ3_DESC_EMPTY } 252 }; 253 254 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 255 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 256 static int jme_rx_ring_count = 0; 257 static int jme_msi_enable = 1; 258 static int jme_msix_enable = 1; 259 260 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 261 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 262 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 263 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable); 264 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable); 265 266 static __inline void 267 jme_setup_rxdesc(struct jme_rxdesc *rxd) 268 { 269 struct jme_desc *desc; 270 271 desc = rxd->rx_desc; 272 desc->buflen = htole32(MCLBYTES); 273 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr)); 274 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr)); 275 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 276 } 277 278 /* 279 * Read a PHY register on the MII of the JMC250. 280 */ 281 static int 282 jme_miibus_readreg(device_t dev, int phy, int reg) 283 { 284 struct jme_softc *sc = device_get_softc(dev); 285 uint32_t val; 286 int i; 287 288 /* For FPGA version, PHY address 0 should be ignored. */ 289 if (sc->jme_caps & JME_CAP_FPGA) { 290 if (phy == 0) 291 return (0); 292 } else { 293 if (sc->jme_phyaddr != phy) 294 return (0); 295 } 296 297 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 298 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 299 300 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 301 DELAY(1); 302 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 303 break; 304 } 305 if (i == 0) { 306 device_printf(sc->jme_dev, "phy read timeout: " 307 "phy %d, reg %d\n", phy, reg); 308 return (0); 309 } 310 311 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 312 } 313 314 /* 315 * Write a PHY register on the MII of the JMC250. 316 */ 317 static int 318 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 319 { 320 struct jme_softc *sc = device_get_softc(dev); 321 int i; 322 323 /* For FPGA version, PHY address 0 should be ignored. */ 324 if (sc->jme_caps & JME_CAP_FPGA) { 325 if (phy == 0) 326 return (0); 327 } else { 328 if (sc->jme_phyaddr != phy) 329 return (0); 330 } 331 332 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 333 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 334 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 335 336 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 337 DELAY(1); 338 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 339 break; 340 } 341 if (i == 0) { 342 device_printf(sc->jme_dev, "phy write timeout: " 343 "phy %d, reg %d\n", phy, reg); 344 } 345 346 return (0); 347 } 348 349 /* 350 * Callback from MII layer when media changes. 351 */ 352 static void 353 jme_miibus_statchg(device_t dev) 354 { 355 struct jme_softc *sc = device_get_softc(dev); 356 struct ifnet *ifp = &sc->arpcom.ac_if; 357 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 358 struct mii_data *mii; 359 struct jme_txdesc *txd; 360 bus_addr_t paddr; 361 int i, r; 362 363 if (sc->jme_in_tick) 364 jme_serialize_skipmain(sc); 365 ASSERT_IFNET_SERIALIZED_ALL(ifp); 366 367 if ((ifp->if_flags & IFF_RUNNING) == 0) 368 goto done; 369 370 mii = device_get_softc(sc->jme_miibus); 371 372 sc->jme_has_link = FALSE; 373 if ((mii->mii_media_status & IFM_AVALID) != 0) { 374 switch (IFM_SUBTYPE(mii->mii_media_active)) { 375 case IFM_10_T: 376 case IFM_100_TX: 377 sc->jme_has_link = TRUE; 378 break; 379 case IFM_1000_T: 380 if (sc->jme_caps & JME_CAP_FASTETH) 381 break; 382 sc->jme_has_link = TRUE; 383 break; 384 default: 385 break; 386 } 387 } 388 389 /* 390 * Disabling Rx/Tx MACs have a side-effect of resetting 391 * JME_TXNDA/JME_RXNDA register to the first address of 392 * Tx/Rx descriptor address. So driver should reset its 393 * internal procucer/consumer pointer and reclaim any 394 * allocated resources. Note, just saving the value of 395 * JME_TXNDA and JME_RXNDA registers before stopping MAC 396 * and restoring JME_TXNDA/JME_RXNDA register is not 397 * sufficient to make sure correct MAC state because 398 * stopping MAC operation can take a while and hardware 399 * might have updated JME_TXNDA/JME_RXNDA registers 400 * during the stop operation. 401 */ 402 403 /* Disable interrupts */ 404 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 405 406 /* Stop driver */ 407 ifp->if_flags &= ~IFF_RUNNING; 408 ifq_clr_oactive(&ifp->if_snd); 409 ifp->if_timer = 0; 410 callout_stop(&sc->jme_tick_ch); 411 412 /* Stop receiver/transmitter. */ 413 jme_stop_rx(sc); 414 jme_stop_tx(sc); 415 416 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 417 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 418 419 jme_rxeof(rdata, -1); 420 if (rdata->jme_rxhead != NULL) 421 m_freem(rdata->jme_rxhead); 422 JME_RXCHAIN_RESET(rdata); 423 424 /* 425 * Reuse configured Rx descriptors and reset 426 * procuder/consumer index. 427 */ 428 rdata->jme_rx_cons = 0; 429 } 430 if (JME_ENABLE_HWRSS(sc)) 431 jme_enable_rss(sc); 432 else 433 jme_disable_rss(sc); 434 435 jme_txeof(tdata); 436 if (tdata->jme_tx_cnt != 0) { 437 /* Remove queued packets for transmit. */ 438 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 439 txd = &tdata->jme_txdesc[i]; 440 if (txd->tx_m != NULL) { 441 bus_dmamap_unload( tdata->jme_tx_tag, 442 txd->tx_dmamap); 443 m_freem(txd->tx_m); 444 txd->tx_m = NULL; 445 txd->tx_ndesc = 0; 446 ifp->if_oerrors++; 447 } 448 } 449 } 450 jme_init_tx_ring(tdata); 451 452 /* Initialize shadow status block. */ 453 jme_init_ssb(sc); 454 455 /* Program MAC with resolved speed/duplex/flow-control. */ 456 if (sc->jme_has_link) { 457 jme_mac_config(sc); 458 459 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 460 461 /* Set Tx ring address to the hardware. */ 462 paddr = tdata->jme_tx_ring_paddr; 463 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 464 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 465 466 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 467 CSR_WRITE_4(sc, JME_RXCSR, 468 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 469 470 /* Set Rx ring address to the hardware. */ 471 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 472 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 473 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 474 } 475 476 /* Restart receiver/transmitter. */ 477 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 478 RXCSR_RXQ_START); 479 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 480 } 481 482 ifp->if_flags |= IFF_RUNNING; 483 ifq_clr_oactive(&ifp->if_snd); 484 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 485 JME_TICK_CPUID); 486 487 #ifdef IFPOLL_ENABLE 488 if (!(ifp->if_flags & IFF_NPOLLING)) 489 #endif 490 /* Reenable interrupts. */ 491 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 492 493 done: 494 if (sc->jme_in_tick) 495 jme_deserialize_skipmain(sc); 496 } 497 498 /* 499 * Get the current interface media status. 500 */ 501 static void 502 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 503 { 504 struct jme_softc *sc = ifp->if_softc; 505 struct mii_data *mii = device_get_softc(sc->jme_miibus); 506 507 ASSERT_IFNET_SERIALIZED_ALL(ifp); 508 509 mii_pollstat(mii); 510 ifmr->ifm_status = mii->mii_media_status; 511 ifmr->ifm_active = mii->mii_media_active; 512 } 513 514 /* 515 * Set hardware to newly-selected media. 516 */ 517 static int 518 jme_mediachange(struct ifnet *ifp) 519 { 520 struct jme_softc *sc = ifp->if_softc; 521 struct mii_data *mii = device_get_softc(sc->jme_miibus); 522 int error; 523 524 ASSERT_IFNET_SERIALIZED_ALL(ifp); 525 526 if (mii->mii_instance != 0) { 527 struct mii_softc *miisc; 528 529 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 530 mii_phy_reset(miisc); 531 } 532 error = mii_mediachg(mii); 533 534 return (error); 535 } 536 537 static int 538 jme_probe(device_t dev) 539 { 540 const struct jme_dev *sp; 541 uint16_t vid, did; 542 543 vid = pci_get_vendor(dev); 544 did = pci_get_device(dev); 545 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 546 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 547 struct jme_softc *sc = device_get_softc(dev); 548 549 sc->jme_caps = sp->jme_caps; 550 device_set_desc(dev, sp->jme_name); 551 return (0); 552 } 553 } 554 return (ENXIO); 555 } 556 557 static int 558 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 559 { 560 uint32_t reg; 561 int i; 562 563 *val = 0; 564 for (i = JME_TIMEOUT; i > 0; i--) { 565 reg = CSR_READ_4(sc, JME_SMBCSR); 566 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 567 break; 568 DELAY(1); 569 } 570 571 if (i == 0) { 572 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 573 return (ETIMEDOUT); 574 } 575 576 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 577 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 578 for (i = JME_TIMEOUT; i > 0; i--) { 579 DELAY(1); 580 reg = CSR_READ_4(sc, JME_SMBINTF); 581 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 582 break; 583 } 584 585 if (i == 0) { 586 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 587 return (ETIMEDOUT); 588 } 589 590 reg = CSR_READ_4(sc, JME_SMBINTF); 591 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 592 593 return (0); 594 } 595 596 static int 597 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 598 { 599 uint8_t fup, reg, val; 600 uint32_t offset; 601 int match; 602 603 offset = 0; 604 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 605 fup != JME_EEPROM_SIG0) 606 return (ENOENT); 607 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 608 fup != JME_EEPROM_SIG1) 609 return (ENOENT); 610 match = 0; 611 do { 612 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 613 break; 614 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 615 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 616 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 617 break; 618 if (reg >= JME_PAR0 && 619 reg < JME_PAR0 + ETHER_ADDR_LEN) { 620 if (jme_eeprom_read_byte(sc, offset + 2, 621 &val) != 0) 622 break; 623 eaddr[reg - JME_PAR0] = val; 624 match++; 625 } 626 } 627 /* Check for the end of EEPROM descriptor. */ 628 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 629 break; 630 /* Try next eeprom descriptor. */ 631 offset += JME_EEPROM_DESC_BYTES; 632 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 633 634 if (match == ETHER_ADDR_LEN) 635 return (0); 636 637 return (ENOENT); 638 } 639 640 static void 641 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 642 { 643 uint32_t par0, par1; 644 645 /* Read station address. */ 646 par0 = CSR_READ_4(sc, JME_PAR0); 647 par1 = CSR_READ_4(sc, JME_PAR1); 648 par1 &= 0xFFFF; 649 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 650 device_printf(sc->jme_dev, 651 "generating fake ethernet address.\n"); 652 par0 = karc4random(); 653 /* Set OUI to JMicron. */ 654 eaddr[0] = 0x00; 655 eaddr[1] = 0x1B; 656 eaddr[2] = 0x8C; 657 eaddr[3] = (par0 >> 16) & 0xff; 658 eaddr[4] = (par0 >> 8) & 0xff; 659 eaddr[5] = par0 & 0xff; 660 } else { 661 eaddr[0] = (par0 >> 0) & 0xFF; 662 eaddr[1] = (par0 >> 8) & 0xFF; 663 eaddr[2] = (par0 >> 16) & 0xFF; 664 eaddr[3] = (par0 >> 24) & 0xFF; 665 eaddr[4] = (par1 >> 0) & 0xFF; 666 eaddr[5] = (par1 >> 8) & 0xFF; 667 } 668 } 669 670 static int 671 jme_attach(device_t dev) 672 { 673 struct jme_softc *sc = device_get_softc(dev); 674 struct ifnet *ifp = &sc->arpcom.ac_if; 675 uint32_t reg; 676 uint16_t did; 677 uint8_t pcie_ptr, rev; 678 int error = 0, i, j, rx_desc_cnt, coal_max; 679 uint8_t eaddr[ETHER_ADDR_LEN]; 680 #ifdef IFPOLL_ENABLE 681 int offset, offset_def; 682 #endif 683 684 /* 685 * Initialize serializers 686 */ 687 lwkt_serialize_init(&sc->jme_serialize); 688 lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize); 689 for (i = 0; i < JME_NRXRING_MAX; ++i) { 690 lwkt_serialize_init( 691 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize); 692 } 693 694 /* 695 * Get # of RX ring descriptors 696 */ 697 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count", 698 jme_rx_desc_count); 699 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN); 700 if (rx_desc_cnt > JME_NDESC_MAX) 701 rx_desc_cnt = JME_NDESC_MAX; 702 703 /* 704 * Get # of TX ring descriptors 705 */ 706 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 707 device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count); 708 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 709 roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN); 710 if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX) 711 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX; 712 713 /* 714 * Get # of RX rings 715 */ 716 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count", 717 jme_rx_ring_count); 718 sc->jme_cdata.jme_rx_ring_cnt = 719 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX); 720 721 /* 722 * Initialize serializer array 723 */ 724 i = 0; 725 sc->jme_serialize_arr[i++] = &sc->jme_serialize; 726 727 KKASSERT(i == JME_TX_SERIALIZE); 728 sc->jme_serialize_arr[i++] = 729 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 730 731 KKASSERT(i == JME_RX_SERIALIZE); 732 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) { 733 sc->jme_serialize_arr[i++] = 734 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize; 735 } 736 KKASSERT(i <= JME_NSERIALIZE); 737 sc->jme_serialize_cnt = i; 738 739 /* 740 * Setup TX ring specific data 741 */ 742 sc->jme_cdata.jme_tx_data.jme_sc = sc; 743 744 /* 745 * Setup RX rings specific data 746 */ 747 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 748 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 749 750 rdata->jme_sc = sc; 751 rdata->jme_rx_coal = jme_rx_status[i].jme_coal; 752 rdata->jme_rx_comp = jme_rx_status[i].jme_comp; 753 rdata->jme_rx_empty = jme_rx_status[i].jme_empty; 754 rdata->jme_rx_idx = i; 755 rdata->jme_rx_desc_cnt = rx_desc_cnt; 756 } 757 758 sc->jme_dev = dev; 759 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 760 761 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 762 763 callout_init(&sc->jme_tick_ch); 764 765 #ifndef BURN_BRIDGES 766 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 767 uint32_t irq, mem; 768 769 irq = pci_read_config(dev, PCIR_INTLINE, 4); 770 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 771 772 device_printf(dev, "chip is in D%d power mode " 773 "-- setting to D0\n", pci_get_powerstate(dev)); 774 775 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 776 777 pci_write_config(dev, PCIR_INTLINE, irq, 4); 778 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 779 } 780 #endif /* !BURN_BRIDGE */ 781 782 /* Enable bus mastering */ 783 pci_enable_busmaster(dev); 784 785 /* 786 * Allocate IO memory 787 * 788 * JMC250 supports both memory mapped and I/O register space 789 * access. Because I/O register access should use different 790 * BARs to access registers it's waste of time to use I/O 791 * register spce access. JMC250 uses 16K to map entire memory 792 * space. 793 */ 794 sc->jme_mem_rid = JME_PCIR_BAR; 795 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 796 &sc->jme_mem_rid, RF_ACTIVE); 797 if (sc->jme_mem_res == NULL) { 798 device_printf(dev, "can't allocate IO memory\n"); 799 return ENXIO; 800 } 801 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 802 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 803 804 /* 805 * Allocate IRQ 806 */ 807 error = jme_intr_alloc(dev); 808 if (error) 809 goto fail; 810 811 /* 812 * Extract revisions 813 */ 814 reg = CSR_READ_4(sc, JME_CHIPMODE); 815 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 816 CHIPMODE_NOT_FPGA) { 817 sc->jme_caps |= JME_CAP_FPGA; 818 if (bootverbose) { 819 device_printf(dev, "FPGA revision: 0x%04x\n", 820 (reg & CHIPMODE_FPGA_REV_MASK) >> 821 CHIPMODE_FPGA_REV_SHIFT); 822 } 823 } 824 825 /* NOTE: FM revision is put in the upper 4 bits */ 826 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 827 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 828 if (bootverbose) 829 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 830 831 did = pci_get_device(dev); 832 switch (did) { 833 case PCI_PRODUCT_JMICRON_JMC250: 834 if (rev == JME_REV1_A2) 835 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 836 break; 837 838 case PCI_PRODUCT_JMICRON_JMC260: 839 if (rev == JME_REV2) 840 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 841 break; 842 843 default: 844 panic("unknown device id 0x%04x", did); 845 } 846 if (rev >= JME_REV2) { 847 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 848 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 849 GHC_TXMAC_CLKSRC_1000; 850 } 851 852 /* Reset the ethernet controller. */ 853 jme_reset(sc); 854 855 /* Map MSI/MSI-X vectors */ 856 jme_set_msinum(sc); 857 858 /* Get station address. */ 859 reg = CSR_READ_4(sc, JME_SMBCSR); 860 if (reg & SMBCSR_EEPROM_PRESENT) 861 error = jme_eeprom_macaddr(sc, eaddr); 862 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 863 if (error != 0 && (bootverbose)) { 864 device_printf(dev, "ethernet hardware address " 865 "not found in EEPROM.\n"); 866 } 867 jme_reg_macaddr(sc, eaddr); 868 } 869 870 /* 871 * Save PHY address. 872 * Integrated JR0211 has fixed PHY address whereas FPGA version 873 * requires PHY probing to get correct PHY address. 874 */ 875 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 876 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 877 GPREG0_PHY_ADDR_MASK; 878 if (bootverbose) { 879 device_printf(dev, "PHY is at address %d.\n", 880 sc->jme_phyaddr); 881 } 882 } else { 883 sc->jme_phyaddr = 0; 884 } 885 886 /* Set max allowable DMA size. */ 887 pcie_ptr = pci_get_pciecap_ptr(dev); 888 if (pcie_ptr != 0) { 889 uint16_t ctrl; 890 891 sc->jme_caps |= JME_CAP_PCIE; 892 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 893 if (bootverbose) { 894 device_printf(dev, "Read request size : %d bytes.\n", 895 128 << ((ctrl >> 12) & 0x07)); 896 device_printf(dev, "TLP payload size : %d bytes.\n", 897 128 << ((ctrl >> 5) & 0x07)); 898 } 899 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 900 case PCIEM_DEVCTL_MAX_READRQ_128: 901 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 902 break; 903 case PCIEM_DEVCTL_MAX_READRQ_256: 904 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 905 break; 906 default: 907 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 908 break; 909 } 910 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 911 } else { 912 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 913 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 914 } 915 916 #ifdef notyet 917 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 918 sc->jme_caps |= JME_CAP_PMCAP; 919 #endif 920 921 #ifdef IFPOLL_ENABLE 922 /* 923 * NPOLLING RX CPU offset 924 */ 925 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 926 offset = 0; 927 } else { 928 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 929 device_get_unit(dev)) % ncpus2; 930 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 931 if (offset >= ncpus2 || 932 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 933 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 934 offset, offset_def); 935 offset = offset_def; 936 } 937 } 938 sc->jme_npoll_rxoff = offset; 939 940 /* 941 * NPOLLING TX CPU offset 942 */ 943 offset_def = sc->jme_npoll_rxoff; 944 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 945 if (offset >= ncpus2) { 946 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 947 offset, offset_def); 948 offset = offset_def; 949 } 950 sc->jme_npoll_txoff = offset; 951 #endif 952 953 /* 954 * Set default coalesce valves 955 */ 956 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 957 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 958 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 959 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 960 961 /* 962 * Adjust coalesce valves, in case that the number of TX/RX 963 * descs are set to small values by users. 964 * 965 * NOTE: coal_max will not be zero, since number of descs 966 * must aligned by JME_NDESC_ALIGN (16 currently) 967 */ 968 coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2; 969 if (coal_max < sc->jme_tx_coal_pkt) 970 sc->jme_tx_coal_pkt = coal_max; 971 972 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2; 973 if (coal_max < sc->jme_rx_coal_pkt) 974 sc->jme_rx_coal_pkt = coal_max; 975 976 sc->jme_cdata.jme_tx_data.jme_tx_wreg = 16; 977 978 /* 979 * Create sysctl tree 980 */ 981 jme_sysctl_node(sc); 982 983 /* Allocate DMA stuffs */ 984 error = jme_dma_alloc(sc); 985 if (error) 986 goto fail; 987 988 ifp->if_softc = sc; 989 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 990 ifp->if_init = jme_init; 991 ifp->if_ioctl = jme_ioctl; 992 ifp->if_start = jme_start; 993 #ifdef IFPOLL_ENABLE 994 ifp->if_npoll = jme_npoll; 995 #endif 996 ifp->if_watchdog = jme_watchdog; 997 ifp->if_serialize = jme_serialize; 998 ifp->if_deserialize = jme_deserialize; 999 ifp->if_tryserialize = jme_tryserialize; 1000 #ifdef INVARIANTS 1001 ifp->if_serialize_assert = jme_serialize_assert; 1002 #endif 1003 ifq_set_maxlen(&ifp->if_snd, 1004 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD); 1005 ifq_set_ready(&ifp->if_snd); 1006 1007 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 1008 ifp->if_capabilities = IFCAP_HWCSUM | 1009 IFCAP_TSO | 1010 IFCAP_VLAN_MTU | 1011 IFCAP_VLAN_HWTAGGING; 1012 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN) 1013 ifp->if_capabilities |= IFCAP_RSS; 1014 ifp->if_capenable = ifp->if_capabilities; 1015 1016 /* 1017 * Disable TXCSUM by default to improve bulk data 1018 * transmit performance (+20Mbps improvement). 1019 */ 1020 ifp->if_capenable &= ~IFCAP_TXCSUM; 1021 1022 if (ifp->if_capenable & IFCAP_TXCSUM) 1023 ifp->if_hwassist |= JME_CSUM_FEATURES; 1024 ifp->if_hwassist |= CSUM_TSO; 1025 1026 /* Set up MII bus. */ 1027 error = mii_phy_probe(dev, &sc->jme_miibus, 1028 jme_mediachange, jme_mediastatus); 1029 if (error) { 1030 device_printf(dev, "no PHY found!\n"); 1031 goto fail; 1032 } 1033 1034 /* 1035 * Save PHYADDR for FPGA mode PHY. 1036 */ 1037 if (sc->jme_caps & JME_CAP_FPGA) { 1038 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1039 1040 if (mii->mii_instance != 0) { 1041 struct mii_softc *miisc; 1042 1043 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 1044 if (miisc->mii_phy != 0) { 1045 sc->jme_phyaddr = miisc->mii_phy; 1046 break; 1047 } 1048 } 1049 if (sc->jme_phyaddr != 0) { 1050 device_printf(sc->jme_dev, 1051 "FPGA PHY is at %d\n", sc->jme_phyaddr); 1052 /* vendor magic. */ 1053 jme_miibus_writereg(dev, sc->jme_phyaddr, 1054 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 1055 1056 /* XXX should we clear JME_WA_EXTFIFO */ 1057 } 1058 } 1059 } 1060 1061 ether_ifattach(ifp, eaddr, NULL); 1062 1063 /* Tell the upper layer(s) we support long frames. */ 1064 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1065 1066 error = jme_intr_setup(dev); 1067 if (error) { 1068 ether_ifdetach(ifp); 1069 goto fail; 1070 } 1071 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid); 1072 1073 return 0; 1074 fail: 1075 jme_detach(dev); 1076 return (error); 1077 } 1078 1079 static int 1080 jme_detach(device_t dev) 1081 { 1082 struct jme_softc *sc = device_get_softc(dev); 1083 1084 if (device_is_attached(dev)) { 1085 struct ifnet *ifp = &sc->arpcom.ac_if; 1086 1087 ifnet_serialize_all(ifp); 1088 jme_stop(sc); 1089 jme_intr_teardown(dev); 1090 ifnet_deserialize_all(ifp); 1091 1092 ether_ifdetach(ifp); 1093 } 1094 1095 if (sc->jme_sysctl_tree != NULL) 1096 sysctl_ctx_free(&sc->jme_sysctl_ctx); 1097 1098 if (sc->jme_miibus != NULL) 1099 device_delete_child(dev, sc->jme_miibus); 1100 bus_generic_detach(dev); 1101 1102 jme_intr_free(dev); 1103 1104 if (sc->jme_mem_res != NULL) { 1105 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 1106 sc->jme_mem_res); 1107 } 1108 1109 jme_dma_free(sc); 1110 1111 return (0); 1112 } 1113 1114 static void 1115 jme_sysctl_node(struct jme_softc *sc) 1116 { 1117 #ifdef JME_RSS_DEBUG 1118 int r; 1119 #endif 1120 1121 sysctl_ctx_init(&sc->jme_sysctl_ctx); 1122 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 1123 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1124 device_get_nameunit(sc->jme_dev), 1125 CTLFLAG_RD, 0, ""); 1126 if (sc->jme_sysctl_tree == NULL) { 1127 device_printf(sc->jme_dev, "can't add sysctl node\n"); 1128 return; 1129 } 1130 1131 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1132 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1133 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1134 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 1135 1136 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1137 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1138 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1139 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 1140 1141 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1142 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1143 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1144 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 1145 1146 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1147 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1148 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1149 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 1150 1151 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1152 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1153 "rx_desc_count", CTLFLAG_RD, 1154 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt, 1155 0, "RX desc count"); 1156 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1157 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1158 "tx_desc_count", CTLFLAG_RD, 1159 &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, 1160 0, "TX desc count"); 1161 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1162 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1163 "rx_ring_count", CTLFLAG_RD, 1164 &sc->jme_cdata.jme_rx_ring_cnt, 1165 0, "RX ring count"); 1166 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1167 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1168 "tx_wreg", CTLFLAG_RW, 1169 &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0, 1170 "# of segments before writing to hardware register"); 1171 1172 #ifdef JME_RSS_DEBUG 1173 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1174 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1175 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug, 1176 0, "RSS debug level"); 1177 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1178 char rx_ring_desc[32]; 1179 1180 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1181 "rx_ring%d_pkt", r); 1182 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1183 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1184 rx_ring_desc, CTLFLAG_RW, 1185 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets"); 1186 1187 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1188 "rx_ring%d_emp", r); 1189 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1190 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1191 rx_ring_desc, CTLFLAG_RW, 1192 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp, 1193 "# of time RX ring empty"); 1194 } 1195 #endif 1196 1197 #ifdef IFPOLL_ENABLE 1198 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1199 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1200 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1201 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1202 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1203 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1204 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1205 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1206 #endif 1207 } 1208 1209 static int 1210 jme_dma_alloc(struct jme_softc *sc) 1211 { 1212 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1213 struct jme_txdesc *txd; 1214 bus_dmamem_t dmem; 1215 int error, i, asize; 1216 1217 asize = __VM_CACHELINE_ALIGN( 1218 tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc)); 1219 tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF, 1220 M_WAITOK | M_ZERO); 1221 1222 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1223 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 1224 1225 asize = __VM_CACHELINE_ALIGN( 1226 rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc)); 1227 rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF, 1228 M_WAITOK | M_ZERO); 1229 } 1230 1231 /* Create parent ring tag. */ 1232 error = bus_dma_tag_create(NULL,/* parent */ 1233 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1234 sc->jme_lowaddr, /* lowaddr */ 1235 BUS_SPACE_MAXADDR, /* highaddr */ 1236 NULL, NULL, /* filter, filterarg */ 1237 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1238 0, /* nsegments */ 1239 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1240 0, /* flags */ 1241 &sc->jme_cdata.jme_ring_tag); 1242 if (error) { 1243 device_printf(sc->jme_dev, 1244 "could not create parent ring DMA tag.\n"); 1245 return error; 1246 } 1247 1248 /* 1249 * Create DMA stuffs for TX ring 1250 */ 1251 asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN); 1252 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1253 JME_TX_RING_ALIGN, 0, 1254 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1255 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1256 if (error) { 1257 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1258 return error; 1259 } 1260 tdata->jme_tx_ring_tag = dmem.dmem_tag; 1261 tdata->jme_tx_ring_map = dmem.dmem_map; 1262 tdata->jme_tx_ring = dmem.dmem_addr; 1263 tdata->jme_tx_ring_paddr = dmem.dmem_busaddr; 1264 1265 /* 1266 * Create DMA stuffs for RX rings 1267 */ 1268 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1269 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1270 if (error) 1271 return error; 1272 } 1273 1274 /* Create parent buffer tag. */ 1275 error = bus_dma_tag_create(NULL,/* parent */ 1276 1, 0, /* algnmnt, boundary */ 1277 sc->jme_lowaddr, /* lowaddr */ 1278 BUS_SPACE_MAXADDR, /* highaddr */ 1279 NULL, NULL, /* filter, filterarg */ 1280 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1281 0, /* nsegments */ 1282 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1283 0, /* flags */ 1284 &sc->jme_cdata.jme_buffer_tag); 1285 if (error) { 1286 device_printf(sc->jme_dev, 1287 "could not create parent buffer DMA tag.\n"); 1288 return error; 1289 } 1290 1291 /* 1292 * Create DMA stuffs for shadow status block 1293 */ 1294 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN); 1295 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1296 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1297 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1298 if (error) { 1299 device_printf(sc->jme_dev, 1300 "could not create shadow status block.\n"); 1301 return error; 1302 } 1303 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1304 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1305 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1306 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1307 1308 /* 1309 * Create DMA stuffs for TX buffers 1310 */ 1311 1312 /* Create tag for Tx buffers. */ 1313 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1314 1, 0, /* algnmnt, boundary */ 1315 BUS_SPACE_MAXADDR, /* lowaddr */ 1316 BUS_SPACE_MAXADDR, /* highaddr */ 1317 NULL, NULL, /* filter, filterarg */ 1318 JME_TSO_MAXSIZE, /* maxsize */ 1319 JME_MAXTXSEGS, /* nsegments */ 1320 JME_MAXSEGSIZE, /* maxsegsize */ 1321 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1322 &tdata->jme_tx_tag); 1323 if (error != 0) { 1324 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1325 return error; 1326 } 1327 1328 /* Create DMA maps for Tx buffers. */ 1329 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1330 txd = &tdata->jme_txdesc[i]; 1331 error = bus_dmamap_create(tdata->jme_tx_tag, 1332 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1333 &txd->tx_dmamap); 1334 if (error) { 1335 int j; 1336 1337 device_printf(sc->jme_dev, 1338 "could not create %dth Tx dmamap.\n", i); 1339 1340 for (j = 0; j < i; ++j) { 1341 txd = &tdata->jme_txdesc[j]; 1342 bus_dmamap_destroy(tdata->jme_tx_tag, 1343 txd->tx_dmamap); 1344 } 1345 bus_dma_tag_destroy(tdata->jme_tx_tag); 1346 tdata->jme_tx_tag = NULL; 1347 return error; 1348 } 1349 } 1350 1351 /* 1352 * Create DMA stuffs for RX buffers 1353 */ 1354 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1355 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1356 if (error) 1357 return error; 1358 } 1359 return 0; 1360 } 1361 1362 static void 1363 jme_dma_free(struct jme_softc *sc) 1364 { 1365 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1366 struct jme_txdesc *txd; 1367 struct jme_rxdesc *rxd; 1368 struct jme_rxdata *rdata; 1369 int i, r; 1370 1371 /* Tx ring */ 1372 if (tdata->jme_tx_ring_tag != NULL) { 1373 bus_dmamap_unload(tdata->jme_tx_ring_tag, 1374 tdata->jme_tx_ring_map); 1375 bus_dmamem_free(tdata->jme_tx_ring_tag, 1376 tdata->jme_tx_ring, tdata->jme_tx_ring_map); 1377 bus_dma_tag_destroy(tdata->jme_tx_ring_tag); 1378 tdata->jme_tx_ring_tag = NULL; 1379 } 1380 1381 /* Rx ring */ 1382 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1383 rdata = &sc->jme_cdata.jme_rx_data[r]; 1384 if (rdata->jme_rx_ring_tag != NULL) { 1385 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1386 rdata->jme_rx_ring_map); 1387 bus_dmamem_free(rdata->jme_rx_ring_tag, 1388 rdata->jme_rx_ring, 1389 rdata->jme_rx_ring_map); 1390 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1391 rdata->jme_rx_ring_tag = NULL; 1392 } 1393 } 1394 1395 /* Tx buffers */ 1396 if (tdata->jme_tx_tag != NULL) { 1397 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1398 txd = &tdata->jme_txdesc[i]; 1399 bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap); 1400 } 1401 bus_dma_tag_destroy(tdata->jme_tx_tag); 1402 tdata->jme_tx_tag = NULL; 1403 } 1404 1405 /* Rx buffers */ 1406 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1407 rdata = &sc->jme_cdata.jme_rx_data[r]; 1408 if (rdata->jme_rx_tag != NULL) { 1409 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 1410 rxd = &rdata->jme_rxdesc[i]; 1411 bus_dmamap_destroy(rdata->jme_rx_tag, 1412 rxd->rx_dmamap); 1413 } 1414 bus_dmamap_destroy(rdata->jme_rx_tag, 1415 rdata->jme_rx_sparemap); 1416 bus_dma_tag_destroy(rdata->jme_rx_tag); 1417 rdata->jme_rx_tag = NULL; 1418 } 1419 } 1420 1421 /* Shadow status block. */ 1422 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1423 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1424 sc->jme_cdata.jme_ssb_map); 1425 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1426 sc->jme_cdata.jme_ssb_block, 1427 sc->jme_cdata.jme_ssb_map); 1428 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1429 sc->jme_cdata.jme_ssb_tag = NULL; 1430 } 1431 1432 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1433 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1434 sc->jme_cdata.jme_buffer_tag = NULL; 1435 } 1436 if (sc->jme_cdata.jme_ring_tag != NULL) { 1437 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1438 sc->jme_cdata.jme_ring_tag = NULL; 1439 } 1440 1441 if (tdata->jme_txdesc != NULL) { 1442 kfree(tdata->jme_txdesc, M_DEVBUF); 1443 tdata->jme_txdesc = NULL; 1444 } 1445 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1446 rdata = &sc->jme_cdata.jme_rx_data[r]; 1447 if (rdata->jme_rxdesc != NULL) { 1448 kfree(rdata->jme_rxdesc, M_DEVBUF); 1449 rdata->jme_rxdesc = NULL; 1450 } 1451 } 1452 } 1453 1454 /* 1455 * Make sure the interface is stopped at reboot time. 1456 */ 1457 static int 1458 jme_shutdown(device_t dev) 1459 { 1460 return jme_suspend(dev); 1461 } 1462 1463 #ifdef notyet 1464 /* 1465 * Unlike other ethernet controllers, JMC250 requires 1466 * explicit resetting link speed to 10/100Mbps as gigabit 1467 * link will cunsume more power than 375mA. 1468 * Note, we reset the link speed to 10/100Mbps with 1469 * auto-negotiation but we don't know whether that operation 1470 * would succeed or not as we have no control after powering 1471 * off. If the renegotiation fail WOL may not work. Running 1472 * at 1Gbps draws more power than 375mA at 3.3V which is 1473 * specified in PCI specification and that would result in 1474 * complete shutdowning power to ethernet controller. 1475 * 1476 * TODO 1477 * Save current negotiated media speed/duplex/flow-control 1478 * to softc and restore the same link again after resuming. 1479 * PHY handling such as power down/resetting to 100Mbps 1480 * may be better handled in suspend method in phy driver. 1481 */ 1482 static void 1483 jme_setlinkspeed(struct jme_softc *sc) 1484 { 1485 struct mii_data *mii; 1486 int aneg, i; 1487 1488 JME_LOCK_ASSERT(sc); 1489 1490 mii = device_get_softc(sc->jme_miibus); 1491 mii_pollstat(mii); 1492 aneg = 0; 1493 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1494 switch IFM_SUBTYPE(mii->mii_media_active) { 1495 case IFM_10_T: 1496 case IFM_100_TX: 1497 return; 1498 case IFM_1000_T: 1499 aneg++; 1500 default: 1501 break; 1502 } 1503 } 1504 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1505 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1506 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1507 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1508 BMCR_AUTOEN | BMCR_STARTNEG); 1509 DELAY(1000); 1510 if (aneg != 0) { 1511 /* Poll link state until jme(4) get a 10/100 link. */ 1512 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1513 mii_pollstat(mii); 1514 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1515 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1516 case IFM_10_T: 1517 case IFM_100_TX: 1518 jme_mac_config(sc); 1519 return; 1520 default: 1521 break; 1522 } 1523 } 1524 JME_UNLOCK(sc); 1525 pause("jmelnk", hz); 1526 JME_LOCK(sc); 1527 } 1528 if (i == MII_ANEGTICKS_GIGE) 1529 device_printf(sc->jme_dev, "establishing link failed, " 1530 "WOL may not work!"); 1531 } 1532 /* 1533 * No link, force MAC to have 100Mbps, full-duplex link. 1534 * This is the last resort and may/may not work. 1535 */ 1536 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1537 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1538 jme_mac_config(sc); 1539 } 1540 1541 static void 1542 jme_setwol(struct jme_softc *sc) 1543 { 1544 struct ifnet *ifp = &sc->arpcom.ac_if; 1545 uint32_t gpr, pmcs; 1546 uint16_t pmstat; 1547 int pmc; 1548 1549 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1550 /* No PME capability, PHY power down. */ 1551 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1552 MII_BMCR, BMCR_PDOWN); 1553 return; 1554 } 1555 1556 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1557 pmcs = CSR_READ_4(sc, JME_PMCS); 1558 pmcs &= ~PMCS_WOL_ENB_MASK; 1559 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1560 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1561 /* Enable PME message. */ 1562 gpr |= GPREG0_PME_ENB; 1563 /* For gigabit controllers, reset link speed to 10/100. */ 1564 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1565 jme_setlinkspeed(sc); 1566 } 1567 1568 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1569 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1570 1571 /* Request PME. */ 1572 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1573 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1574 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1575 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1576 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1577 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1578 /* No WOL, PHY power down. */ 1579 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1580 MII_BMCR, BMCR_PDOWN); 1581 } 1582 } 1583 #endif 1584 1585 static int 1586 jme_suspend(device_t dev) 1587 { 1588 struct jme_softc *sc = device_get_softc(dev); 1589 struct ifnet *ifp = &sc->arpcom.ac_if; 1590 1591 ifnet_serialize_all(ifp); 1592 jme_stop(sc); 1593 #ifdef notyet 1594 jme_setwol(sc); 1595 #endif 1596 ifnet_deserialize_all(ifp); 1597 1598 return (0); 1599 } 1600 1601 static int 1602 jme_resume(device_t dev) 1603 { 1604 struct jme_softc *sc = device_get_softc(dev); 1605 struct ifnet *ifp = &sc->arpcom.ac_if; 1606 #ifdef notyet 1607 int pmc; 1608 #endif 1609 1610 ifnet_serialize_all(ifp); 1611 1612 #ifdef notyet 1613 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1614 uint16_t pmstat; 1615 1616 pmstat = pci_read_config(sc->jme_dev, 1617 pmc + PCIR_POWER_STATUS, 2); 1618 /* Disable PME clear PME status. */ 1619 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1620 pci_write_config(sc->jme_dev, 1621 pmc + PCIR_POWER_STATUS, pmstat, 2); 1622 } 1623 #endif 1624 1625 if (ifp->if_flags & IFF_UP) 1626 jme_init(sc); 1627 1628 ifnet_deserialize_all(ifp); 1629 1630 return (0); 1631 } 1632 1633 static __inline int 1634 jme_tso_pullup(struct mbuf **mp) 1635 { 1636 int hoff, iphlen, thoff; 1637 struct mbuf *m; 1638 1639 m = *mp; 1640 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1641 1642 iphlen = m->m_pkthdr.csum_iphlen; 1643 thoff = m->m_pkthdr.csum_thlen; 1644 hoff = m->m_pkthdr.csum_lhlen; 1645 1646 KASSERT(iphlen > 0, ("invalid ip hlen")); 1647 KASSERT(thoff > 0, ("invalid tcp hlen")); 1648 KASSERT(hoff > 0, ("invalid ether hlen")); 1649 1650 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1651 m = m_pullup(m, hoff + iphlen + thoff); 1652 if (m == NULL) { 1653 *mp = NULL; 1654 return ENOBUFS; 1655 } 1656 *mp = m; 1657 } 1658 return 0; 1659 } 1660 1661 static int 1662 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used) 1663 { 1664 struct jme_txdesc *txd; 1665 struct jme_desc *desc; 1666 struct mbuf *m; 1667 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1668 int maxsegs, nsegs; 1669 int error, i, prod, symbol_desc; 1670 uint32_t cflags, flag64, mss; 1671 1672 M_ASSERTPKTHDR((*m_head)); 1673 1674 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) { 1675 /* XXX Is this necessary? */ 1676 error = jme_tso_pullup(m_head); 1677 if (error) 1678 return error; 1679 } 1680 1681 prod = tdata->jme_tx_prod; 1682 txd = &tdata->jme_txdesc[prod]; 1683 1684 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1685 symbol_desc = 1; 1686 else 1687 symbol_desc = 0; 1688 1689 maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) - 1690 (JME_TXD_RSVD + symbol_desc); 1691 if (maxsegs > JME_MAXTXSEGS) 1692 maxsegs = JME_MAXTXSEGS; 1693 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc), 1694 ("not enough segments %d", maxsegs)); 1695 1696 error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag, 1697 txd->tx_dmamap, m_head, 1698 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1699 if (error) 1700 goto fail; 1701 *segs_used += nsegs; 1702 1703 bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap, 1704 BUS_DMASYNC_PREWRITE); 1705 1706 m = *m_head; 1707 cflags = 0; 1708 mss = 0; 1709 1710 /* Configure checksum offload. */ 1711 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1712 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT; 1713 cflags |= JME_TD_TSO; 1714 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) { 1715 if (m->m_pkthdr.csum_flags & CSUM_IP) 1716 cflags |= JME_TD_IPCSUM; 1717 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1718 cflags |= JME_TD_TCPCSUM; 1719 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1720 cflags |= JME_TD_UDPCSUM; 1721 } 1722 1723 /* Configure VLAN. */ 1724 if (m->m_flags & M_VLANTAG) { 1725 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1726 cflags |= JME_TD_VLAN_TAG; 1727 } 1728 1729 desc = &tdata->jme_tx_ring[prod]; 1730 desc->flags = htole32(cflags); 1731 desc->addr_hi = htole32(m->m_pkthdr.len); 1732 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1733 /* 1734 * Use 64bits TX desc chain format. 1735 * 1736 * The first TX desc of the chain, which is setup here, 1737 * is just a symbol TX desc carrying no payload. 1738 */ 1739 flag64 = JME_TD_64BIT; 1740 desc->buflen = htole32(mss); 1741 desc->addr_lo = 0; 1742 1743 *segs_used += 1; 1744 1745 /* No effective TX desc is consumed */ 1746 i = 0; 1747 } else { 1748 /* 1749 * Use 32bits TX desc chain format. 1750 * 1751 * The first TX desc of the chain, which is setup here, 1752 * is an effective TX desc carrying the first segment of 1753 * the mbuf chain. 1754 */ 1755 flag64 = 0; 1756 desc->buflen = htole32(mss | txsegs[0].ds_len); 1757 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1758 1759 /* One effective TX desc is consumed */ 1760 i = 1; 1761 } 1762 tdata->jme_tx_cnt++; 1763 KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1764 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1765 1766 txd->tx_ndesc = 1 - i; 1767 for (; i < nsegs; i++) { 1768 desc = &tdata->jme_tx_ring[prod]; 1769 desc->buflen = htole32(txsegs[i].ds_len); 1770 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1771 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1772 desc->flags = htole32(JME_TD_OWN | flag64); 1773 1774 tdata->jme_tx_cnt++; 1775 KKASSERT(tdata->jme_tx_cnt <= 1776 tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1777 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1778 } 1779 1780 /* Update producer index. */ 1781 tdata->jme_tx_prod = prod; 1782 /* 1783 * Finally request interrupt and give the first descriptor 1784 * owenership to hardware. 1785 */ 1786 desc = txd->tx_desc; 1787 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1788 1789 txd->tx_m = m; 1790 txd->tx_ndesc += nsegs; 1791 1792 return 0; 1793 fail: 1794 m_freem(*m_head); 1795 *m_head = NULL; 1796 return error; 1797 } 1798 1799 static void 1800 jme_start(struct ifnet *ifp) 1801 { 1802 struct jme_softc *sc = ifp->if_softc; 1803 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1804 struct mbuf *m_head; 1805 int enq = 0; 1806 1807 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 1808 1809 if (!sc->jme_has_link) { 1810 ifq_purge(&ifp->if_snd); 1811 return; 1812 } 1813 1814 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1815 return; 1816 1817 if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata)) 1818 jme_txeof(tdata); 1819 1820 while (!ifq_is_empty(&ifp->if_snd)) { 1821 /* 1822 * Check number of available TX descs, always 1823 * leave JME_TXD_RSVD free TX descs. 1824 */ 1825 if (tdata->jme_tx_cnt + JME_TXD_SPARE > 1826 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) { 1827 ifq_set_oactive(&ifp->if_snd); 1828 break; 1829 } 1830 1831 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1832 if (m_head == NULL) 1833 break; 1834 1835 /* 1836 * Pack the data into the transmit ring. If we 1837 * don't have room, set the OACTIVE flag and wait 1838 * for the NIC to drain the ring. 1839 */ 1840 if (jme_encap(tdata, &m_head, &enq)) { 1841 KKASSERT(m_head == NULL); 1842 ifp->if_oerrors++; 1843 ifq_set_oactive(&ifp->if_snd); 1844 break; 1845 } 1846 1847 if (enq >= tdata->jme_tx_wreg) { 1848 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | 1849 TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1850 enq = 0; 1851 } 1852 1853 /* 1854 * If there's a BPF listener, bounce a copy of this frame 1855 * to him. 1856 */ 1857 ETHER_BPF_MTAP(ifp, m_head); 1858 1859 /* Set a timeout in case the chip goes out to lunch. */ 1860 ifp->if_timer = JME_TX_TIMEOUT; 1861 } 1862 1863 if (enq > 0) { 1864 /* 1865 * Reading TXCSR takes very long time under heavy load 1866 * so cache TXCSR value and writes the ORed value with 1867 * the kick command to the TXCSR. This saves one register 1868 * access cycle. 1869 */ 1870 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1871 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1872 } 1873 } 1874 1875 static void 1876 jme_watchdog(struct ifnet *ifp) 1877 { 1878 struct jme_softc *sc = ifp->if_softc; 1879 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1880 1881 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1882 1883 if (!sc->jme_has_link) { 1884 if_printf(ifp, "watchdog timeout (missed link)\n"); 1885 ifp->if_oerrors++; 1886 jme_init(sc); 1887 return; 1888 } 1889 1890 jme_txeof(tdata); 1891 if (tdata->jme_tx_cnt == 0) { 1892 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1893 "-- recovering\n"); 1894 if (!ifq_is_empty(&ifp->if_snd)) 1895 if_devstart(ifp); 1896 return; 1897 } 1898 1899 if_printf(ifp, "watchdog timeout\n"); 1900 ifp->if_oerrors++; 1901 jme_init(sc); 1902 if (!ifq_is_empty(&ifp->if_snd)) 1903 if_devstart(ifp); 1904 } 1905 1906 static int 1907 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1908 { 1909 struct jme_softc *sc = ifp->if_softc; 1910 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1911 struct ifreq *ifr = (struct ifreq *)data; 1912 int error = 0, mask; 1913 1914 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1915 1916 switch (cmd) { 1917 case SIOCSIFMTU: 1918 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1919 (!(sc->jme_caps & JME_CAP_JUMBO) && 1920 ifr->ifr_mtu > JME_MAX_MTU)) { 1921 error = EINVAL; 1922 break; 1923 } 1924 1925 if (ifp->if_mtu != ifr->ifr_mtu) { 1926 /* 1927 * No special configuration is required when interface 1928 * MTU is changed but availability of Tx checksum 1929 * offload should be chcked against new MTU size as 1930 * FIFO size is just 2K. 1931 */ 1932 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1933 ifp->if_capenable &= 1934 ~(IFCAP_TXCSUM | IFCAP_TSO); 1935 ifp->if_hwassist &= 1936 ~(JME_CSUM_FEATURES | CSUM_TSO); 1937 } 1938 ifp->if_mtu = ifr->ifr_mtu; 1939 if (ifp->if_flags & IFF_RUNNING) 1940 jme_init(sc); 1941 } 1942 break; 1943 1944 case SIOCSIFFLAGS: 1945 if (ifp->if_flags & IFF_UP) { 1946 if (ifp->if_flags & IFF_RUNNING) { 1947 if ((ifp->if_flags ^ sc->jme_if_flags) & 1948 (IFF_PROMISC | IFF_ALLMULTI)) 1949 jme_set_filter(sc); 1950 } else { 1951 jme_init(sc); 1952 } 1953 } else { 1954 if (ifp->if_flags & IFF_RUNNING) 1955 jme_stop(sc); 1956 } 1957 sc->jme_if_flags = ifp->if_flags; 1958 break; 1959 1960 case SIOCADDMULTI: 1961 case SIOCDELMULTI: 1962 if (ifp->if_flags & IFF_RUNNING) 1963 jme_set_filter(sc); 1964 break; 1965 1966 case SIOCSIFMEDIA: 1967 case SIOCGIFMEDIA: 1968 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1969 break; 1970 1971 case SIOCSIFCAP: 1972 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1973 1974 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1975 ifp->if_capenable ^= IFCAP_TXCSUM; 1976 if (ifp->if_capenable & IFCAP_TXCSUM) 1977 ifp->if_hwassist |= JME_CSUM_FEATURES; 1978 else 1979 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1980 } 1981 if (mask & IFCAP_RXCSUM) { 1982 uint32_t reg; 1983 1984 ifp->if_capenable ^= IFCAP_RXCSUM; 1985 reg = CSR_READ_4(sc, JME_RXMAC); 1986 reg &= ~RXMAC_CSUM_ENB; 1987 if (ifp->if_capenable & IFCAP_RXCSUM) 1988 reg |= RXMAC_CSUM_ENB; 1989 CSR_WRITE_4(sc, JME_RXMAC, reg); 1990 } 1991 1992 if (mask & IFCAP_VLAN_HWTAGGING) { 1993 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1994 jme_set_vlan(sc); 1995 } 1996 1997 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1998 ifp->if_capenable ^= IFCAP_TSO; 1999 if (ifp->if_capenable & IFCAP_TSO) 2000 ifp->if_hwassist |= CSUM_TSO; 2001 else 2002 ifp->if_hwassist &= ~CSUM_TSO; 2003 } 2004 2005 if (mask & IFCAP_RSS) 2006 ifp->if_capenable ^= IFCAP_RSS; 2007 break; 2008 2009 default: 2010 error = ether_ioctl(ifp, cmd, data); 2011 break; 2012 } 2013 return (error); 2014 } 2015 2016 static void 2017 jme_mac_config(struct jme_softc *sc) 2018 { 2019 struct mii_data *mii; 2020 uint32_t ghc, rxmac, txmac, txpause, gp1; 2021 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 2022 2023 mii = device_get_softc(sc->jme_miibus); 2024 2025 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2026 DELAY(10); 2027 CSR_WRITE_4(sc, JME_GHC, 0); 2028 ghc = 0; 2029 rxmac = CSR_READ_4(sc, JME_RXMAC); 2030 rxmac &= ~RXMAC_FC_ENB; 2031 txmac = CSR_READ_4(sc, JME_TXMAC); 2032 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2033 txpause = CSR_READ_4(sc, JME_TXPFC); 2034 txpause &= ~TXPFC_PAUSE_ENB; 2035 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2036 ghc |= GHC_FULL_DUPLEX; 2037 rxmac &= ~RXMAC_COLL_DET_ENB; 2038 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2039 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2040 TXMAC_FRAME_BURST); 2041 #ifdef notyet 2042 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2043 txpause |= TXPFC_PAUSE_ENB; 2044 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2045 rxmac |= RXMAC_FC_ENB; 2046 #endif 2047 /* Disable retry transmit timer/retry limit. */ 2048 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2049 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2050 } else { 2051 rxmac |= RXMAC_COLL_DET_ENB; 2052 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2053 /* Enable retry transmit timer/retry limit. */ 2054 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2055 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2056 } 2057 2058 /* 2059 * Reprogram Tx/Rx MACs with resolved speed/duplex. 2060 */ 2061 gp1 = CSR_READ_4(sc, JME_GPREG1); 2062 gp1 &= ~GPREG1_WA_HDX; 2063 2064 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2065 hdx = 1; 2066 2067 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2068 case IFM_10_T: 2069 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 2070 if (hdx) 2071 gp1 |= GPREG1_WA_HDX; 2072 break; 2073 2074 case IFM_100_TX: 2075 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 2076 if (hdx) 2077 gp1 |= GPREG1_WA_HDX; 2078 2079 /* 2080 * Use extended FIFO depth to workaround CRC errors 2081 * emitted by chips before JMC250B 2082 */ 2083 phyconf = JMPHY_CONF_EXTFIFO; 2084 break; 2085 2086 case IFM_1000_T: 2087 if (sc->jme_caps & JME_CAP_FASTETH) 2088 break; 2089 2090 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 2091 if (hdx) 2092 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2093 break; 2094 2095 default: 2096 break; 2097 } 2098 CSR_WRITE_4(sc, JME_GHC, ghc); 2099 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2100 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2101 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2102 2103 if (sc->jme_workaround & JME_WA_EXTFIFO) { 2104 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2105 JMPHY_CONF, phyconf); 2106 } 2107 if (sc->jme_workaround & JME_WA_HDX) 2108 CSR_WRITE_4(sc, JME_GPREG1, gp1); 2109 } 2110 2111 static void 2112 jme_intr(void *xsc) 2113 { 2114 struct jme_softc *sc = xsc; 2115 struct ifnet *ifp = &sc->arpcom.ac_if; 2116 uint32_t status; 2117 int r; 2118 2119 ASSERT_SERIALIZED(&sc->jme_serialize); 2120 2121 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2122 if (status == 0 || status == 0xFFFFFFFF) 2123 return; 2124 2125 /* Disable interrupts. */ 2126 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2127 2128 status = CSR_READ_4(sc, JME_INTR_STATUS); 2129 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2130 goto back; 2131 2132 /* Reset PCC counter/timer and Ack interrupts. */ 2133 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2134 2135 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 2136 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2137 2138 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2139 if (status & jme_rx_status[r].jme_coal) { 2140 status |= jme_rx_status[r].jme_coal | 2141 jme_rx_status[r].jme_comp; 2142 } 2143 } 2144 2145 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2146 2147 if (ifp->if_flags & IFF_RUNNING) { 2148 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2149 2150 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2151 jme_rx_intr(sc, status); 2152 2153 if (status & INTR_RXQ_DESC_EMPTY) { 2154 /* 2155 * Notify hardware availability of new Rx buffers. 2156 * Reading RXCSR takes very long time under heavy 2157 * load so cache RXCSR value and writes the ORed 2158 * value with the kick command to the RXCSR. This 2159 * saves one register access cycle. 2160 */ 2161 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2162 RXCSR_RX_ENB | RXCSR_RXQ_START); 2163 } 2164 2165 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 2166 lwkt_serialize_enter(&tdata->jme_tx_serialize); 2167 jme_txeof(tdata); 2168 if (!ifq_is_empty(&ifp->if_snd)) 2169 if_devstart(ifp); 2170 lwkt_serialize_exit(&tdata->jme_tx_serialize); 2171 } 2172 } 2173 back: 2174 /* Reenable interrupts. */ 2175 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2176 } 2177 2178 static void 2179 jme_txeof(struct jme_txdata *tdata) 2180 { 2181 struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if; 2182 int cons; 2183 2184 cons = tdata->jme_tx_cons; 2185 if (cons == tdata->jme_tx_prod) 2186 return; 2187 2188 /* 2189 * Go through our Tx list and free mbufs for those 2190 * frames which have been transmitted. 2191 */ 2192 while (cons != tdata->jme_tx_prod) { 2193 struct jme_txdesc *txd, *next_txd; 2194 uint32_t status, next_status; 2195 int next_cons, nsegs; 2196 2197 txd = &tdata->jme_txdesc[cons]; 2198 KASSERT(txd->tx_m != NULL, 2199 ("%s: freeing NULL mbuf!", __func__)); 2200 2201 status = le32toh(txd->tx_desc->flags); 2202 if ((status & JME_TD_OWN) == JME_TD_OWN) 2203 break; 2204 2205 /* 2206 * NOTE: 2207 * This chip will always update the TX descriptor's 2208 * buflen field and this updating always happens 2209 * after clearing the OWN bit, so even if the OWN 2210 * bit is cleared by the chip, we still don't sure 2211 * about whether the buflen field has been updated 2212 * by the chip or not. To avoid this race, we wait 2213 * for the next TX descriptor's OWN bit to be cleared 2214 * by the chip before reusing this TX descriptor. 2215 */ 2216 next_cons = cons; 2217 JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt); 2218 next_txd = &tdata->jme_txdesc[next_cons]; 2219 if (next_txd->tx_m == NULL) 2220 break; 2221 next_status = le32toh(next_txd->tx_desc->flags); 2222 if ((next_status & JME_TD_OWN) == JME_TD_OWN) 2223 break; 2224 2225 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2226 ifp->if_oerrors++; 2227 } else { 2228 ifp->if_opackets++; 2229 if (status & JME_TD_COLLISION) { 2230 ifp->if_collisions += 2231 le32toh(txd->tx_desc->buflen) & 2232 JME_TD_BUF_LEN_MASK; 2233 } 2234 } 2235 2236 /* 2237 * Only the first descriptor of multi-descriptor 2238 * transmission is updated so driver have to skip entire 2239 * chained buffers for the transmiited frame. In other 2240 * words, JME_TD_OWN bit is valid only at the first 2241 * descriptor of a multi-descriptor transmission. 2242 */ 2243 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2244 tdata->jme_tx_ring[cons].flags = 0; 2245 JME_DESC_INC(cons, tdata->jme_tx_desc_cnt); 2246 } 2247 2248 /* Reclaim transferred mbufs. */ 2249 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2250 m_freem(txd->tx_m); 2251 txd->tx_m = NULL; 2252 tdata->jme_tx_cnt -= txd->tx_ndesc; 2253 KASSERT(tdata->jme_tx_cnt >= 0, 2254 ("%s: Active Tx desc counter was garbled", __func__)); 2255 txd->tx_ndesc = 0; 2256 } 2257 tdata->jme_tx_cons = cons; 2258 2259 /* 1 for symbol TX descriptor */ 2260 if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1) 2261 ifp->if_timer = 0; 2262 2263 if (tdata->jme_tx_cnt + JME_TXD_SPARE <= 2264 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) 2265 ifq_clr_oactive(&ifp->if_snd); 2266 } 2267 2268 static __inline void 2269 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count) 2270 { 2271 int i; 2272 2273 for (i = 0; i < count; ++i) { 2274 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]); 2275 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2276 } 2277 } 2278 2279 static __inline struct pktinfo * 2280 jme_pktinfo(struct pktinfo *pi, uint32_t flags) 2281 { 2282 if (flags & JME_RD_IPV4) 2283 pi->pi_netisr = NETISR_IP; 2284 else if (flags & JME_RD_IPV6) 2285 pi->pi_netisr = NETISR_IPV6; 2286 else 2287 return NULL; 2288 2289 pi->pi_flags = 0; 2290 pi->pi_l3proto = IPPROTO_UNKNOWN; 2291 2292 if (flags & JME_RD_MORE_FRAG) 2293 pi->pi_flags |= PKTINFO_FLAG_FRAG; 2294 else if (flags & JME_RD_TCP) 2295 pi->pi_l3proto = IPPROTO_TCP; 2296 else if (flags & JME_RD_UDP) 2297 pi->pi_l3proto = IPPROTO_UDP; 2298 else 2299 pi = NULL; 2300 return pi; 2301 } 2302 2303 /* Receive a frame. */ 2304 static void 2305 jme_rxpkt(struct jme_rxdata *rdata) 2306 { 2307 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if; 2308 struct jme_desc *desc; 2309 struct jme_rxdesc *rxd; 2310 struct mbuf *mp, *m; 2311 uint32_t flags, status, hash, hashinfo; 2312 int cons, count, nsegs; 2313 2314 cons = rdata->jme_rx_cons; 2315 desc = &rdata->jme_rx_ring[cons]; 2316 2317 flags = le32toh(desc->flags); 2318 status = le32toh(desc->buflen); 2319 hash = le32toh(desc->addr_hi); 2320 hashinfo = le32toh(desc->addr_lo); 2321 nsegs = JME_RX_NSEGS(status); 2322 2323 if (nsegs > 1) { 2324 /* Skip the first descriptor. */ 2325 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2326 2327 /* 2328 * Clear the OWN bit of the following RX descriptors; 2329 * hardware will not clear the OWN bit except the first 2330 * RX descriptor. 2331 * 2332 * Since the first RX descriptor is setup, i.e. OWN bit 2333 * on, before its followins RX descriptors, leaving the 2334 * OWN bit on the following RX descriptors will trick 2335 * the hardware into thinking that the following RX 2336 * descriptors are ready to be used too. 2337 */ 2338 for (count = 1; count < nsegs; count++, 2339 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) 2340 rdata->jme_rx_ring[cons].flags = 0; 2341 2342 cons = rdata->jme_rx_cons; 2343 } 2344 2345 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, " 2346 "hash 0x%08x, hash info 0x%08x\n", 2347 rdata->jme_rx_idx, flags, hash, hashinfo); 2348 2349 if (status & JME_RX_ERR_STAT) { 2350 ifp->if_ierrors++; 2351 jme_discard_rxbufs(rdata, cons, nsegs); 2352 #ifdef JME_SHOW_ERRORS 2353 if_printf(ifp, "%s : receive error = 0x%b\n", 2354 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2355 #endif 2356 rdata->jme_rx_cons += nsegs; 2357 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2358 return; 2359 } 2360 2361 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2362 for (count = 0; count < nsegs; count++, 2363 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) { 2364 rxd = &rdata->jme_rxdesc[cons]; 2365 mp = rxd->rx_m; 2366 2367 /* Add a new receive buffer to the ring. */ 2368 if (jme_newbuf(rdata, rxd, 0) != 0) { 2369 ifp->if_iqdrops++; 2370 /* Reuse buffer. */ 2371 jme_discard_rxbufs(rdata, cons, nsegs - count); 2372 if (rdata->jme_rxhead != NULL) { 2373 m_freem(rdata->jme_rxhead); 2374 JME_RXCHAIN_RESET(rdata); 2375 } 2376 break; 2377 } 2378 2379 /* 2380 * Assume we've received a full sized frame. 2381 * Actual size is fixed when we encounter the end of 2382 * multi-segmented frame. 2383 */ 2384 mp->m_len = MCLBYTES; 2385 2386 /* Chain received mbufs. */ 2387 if (rdata->jme_rxhead == NULL) { 2388 rdata->jme_rxhead = mp; 2389 rdata->jme_rxtail = mp; 2390 } else { 2391 /* 2392 * Receive processor can receive a maximum frame 2393 * size of 65535 bytes. 2394 */ 2395 rdata->jme_rxtail->m_next = mp; 2396 rdata->jme_rxtail = mp; 2397 } 2398 2399 if (count == nsegs - 1) { 2400 struct pktinfo pi0, *pi; 2401 2402 /* Last desc. for this frame. */ 2403 m = rdata->jme_rxhead; 2404 m->m_pkthdr.len = rdata->jme_rxlen; 2405 if (nsegs > 1) { 2406 /* Set first mbuf size. */ 2407 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2408 /* Set last mbuf size. */ 2409 mp->m_len = rdata->jme_rxlen - 2410 ((MCLBYTES - JME_RX_PAD_BYTES) + 2411 (MCLBYTES * (nsegs - 2))); 2412 } else { 2413 m->m_len = rdata->jme_rxlen; 2414 } 2415 m->m_pkthdr.rcvif = ifp; 2416 2417 /* 2418 * Account for 10bytes auto padding which is used 2419 * to align IP header on 32bit boundary. Also note, 2420 * CRC bytes is automatically removed by the 2421 * hardware. 2422 */ 2423 m->m_data += JME_RX_PAD_BYTES; 2424 2425 /* Set checksum information. */ 2426 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2427 (flags & JME_RD_IPV4)) { 2428 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2429 if (flags & JME_RD_IPCSUM) 2430 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2431 if ((flags & JME_RD_MORE_FRAG) == 0 && 2432 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2433 (JME_RD_TCP | JME_RD_TCPCSUM) || 2434 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2435 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2436 m->m_pkthdr.csum_flags |= 2437 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2438 m->m_pkthdr.csum_data = 0xffff; 2439 } 2440 } 2441 2442 /* Check for VLAN tagged packets. */ 2443 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2444 (flags & JME_RD_VLAN_TAG)) { 2445 m->m_pkthdr.ether_vlantag = 2446 flags & JME_RD_VLAN_MASK; 2447 m->m_flags |= M_VLANTAG; 2448 } 2449 2450 ifp->if_ipackets++; 2451 2452 if (ifp->if_capenable & IFCAP_RSS) 2453 pi = jme_pktinfo(&pi0, flags); 2454 else 2455 pi = NULL; 2456 2457 if (pi != NULL && 2458 (hashinfo & JME_RD_HASH_FN_MASK) == 2459 JME_RD_HASH_FN_TOEPLITZ) { 2460 m->m_flags |= (M_HASH | M_CKHASH); 2461 m->m_pkthdr.hash = toeplitz_hash(hash); 2462 } 2463 2464 #ifdef JME_RSS_DEBUG 2465 if (pi != NULL) { 2466 JME_RSS_DPRINTF(rdata->jme_sc, 10, 2467 "isr %d flags %08x, l3 %d %s\n", 2468 pi->pi_netisr, pi->pi_flags, 2469 pi->pi_l3proto, 2470 (m->m_flags & M_HASH) ? "hash" : ""); 2471 } 2472 #endif 2473 2474 /* Pass it on. */ 2475 ether_input_pkt(ifp, m, pi); 2476 2477 /* Reset mbuf chains. */ 2478 JME_RXCHAIN_RESET(rdata); 2479 #ifdef JME_RSS_DEBUG 2480 rdata->jme_rx_pkt++; 2481 #endif 2482 } 2483 } 2484 2485 rdata->jme_rx_cons += nsegs; 2486 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2487 } 2488 2489 static void 2490 jme_rxeof(struct jme_rxdata *rdata, int count) 2491 { 2492 struct jme_desc *desc; 2493 int nsegs, pktlen; 2494 2495 for (;;) { 2496 #ifdef IFPOLL_ENABLE 2497 if (count >= 0 && count-- == 0) 2498 break; 2499 #endif 2500 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2501 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2502 break; 2503 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2504 break; 2505 2506 /* 2507 * Check number of segments against received bytes. 2508 * Non-matching value would indicate that hardware 2509 * is still trying to update Rx descriptors. I'm not 2510 * sure whether this check is needed. 2511 */ 2512 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2513 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2514 if (nsegs != howmany(pktlen, MCLBYTES)) { 2515 if_printf(&rdata->jme_sc->arpcom.ac_if, 2516 "RX fragment count(%d) and " 2517 "packet size(%d) mismach\n", nsegs, pktlen); 2518 break; 2519 } 2520 2521 /* 2522 * NOTE: 2523 * RSS hash and hash information may _not_ be set by the 2524 * hardware even if the OWN bit is cleared and VALID bit 2525 * is set. 2526 * 2527 * If the RSS information is not delivered by the hardware 2528 * yet, we MUST NOT accept this packet, let alone reusing 2529 * its RX descriptor. If this packet was accepted and its 2530 * RX descriptor was reused before hardware delivering the 2531 * RSS information, the RX buffer's address would be trashed 2532 * by the RSS information delivered by the hardware. 2533 */ 2534 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 2535 struct jme_rxdesc *rxd; 2536 uint32_t hashinfo; 2537 2538 hashinfo = le32toh(desc->addr_lo); 2539 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons]; 2540 2541 /* 2542 * This test should be enough to detect the pending 2543 * RSS information delivery, given: 2544 * - If RSS hash is not calculated, the hashinfo 2545 * will be 0. Howvever, the lower 32bits of RX 2546 * buffers' physical address will never be 0. 2547 * (see jme_rxbuf_dma_filter) 2548 * - If RSS hash is calculated, the lowest 4 bits 2549 * of hashinfo will be set, while the RX buffers 2550 * are at least 2K aligned. 2551 */ 2552 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) { 2553 #ifdef JME_SHOW_RSSWB 2554 if_printf(&rdata->jme_sc->arpcom.ac_if, 2555 "RSS is not written back yet\n"); 2556 #endif 2557 break; 2558 } 2559 } 2560 2561 /* Received a frame. */ 2562 jme_rxpkt(rdata); 2563 } 2564 } 2565 2566 static void 2567 jme_tick(void *xsc) 2568 { 2569 struct jme_softc *sc = xsc; 2570 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2571 2572 lwkt_serialize_enter(&sc->jme_serialize); 2573 2574 KKASSERT(mycpuid == JME_TICK_CPUID); 2575 2576 sc->jme_in_tick = TRUE; 2577 mii_tick(mii); 2578 sc->jme_in_tick = FALSE; 2579 2580 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2581 2582 lwkt_serialize_exit(&sc->jme_serialize); 2583 } 2584 2585 static void 2586 jme_reset(struct jme_softc *sc) 2587 { 2588 uint32_t val; 2589 2590 /* Make sure that TX and RX are stopped */ 2591 jme_stop_tx(sc); 2592 jme_stop_rx(sc); 2593 2594 /* Start reset */ 2595 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2596 DELAY(20); 2597 2598 /* 2599 * Hold reset bit before stop reset 2600 */ 2601 2602 /* Disable TXMAC and TXOFL clock sources */ 2603 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2604 /* Disable RXMAC clock source */ 2605 val = CSR_READ_4(sc, JME_GPREG1); 2606 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2607 /* Flush */ 2608 CSR_READ_4(sc, JME_GHC); 2609 2610 /* Stop reset */ 2611 CSR_WRITE_4(sc, JME_GHC, 0); 2612 /* Flush */ 2613 CSR_READ_4(sc, JME_GHC); 2614 2615 /* 2616 * Clear reset bit after stop reset 2617 */ 2618 2619 /* Enable TXMAC and TXOFL clock sources */ 2620 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2621 /* Enable RXMAC clock source */ 2622 val = CSR_READ_4(sc, JME_GPREG1); 2623 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2624 /* Flush */ 2625 CSR_READ_4(sc, JME_GHC); 2626 2627 /* Disable TXMAC and TXOFL clock sources */ 2628 CSR_WRITE_4(sc, JME_GHC, 0); 2629 /* Disable RXMAC clock source */ 2630 val = CSR_READ_4(sc, JME_GPREG1); 2631 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2632 /* Flush */ 2633 CSR_READ_4(sc, JME_GHC); 2634 2635 /* Enable TX and RX */ 2636 val = CSR_READ_4(sc, JME_TXCSR); 2637 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB); 2638 val = CSR_READ_4(sc, JME_RXCSR); 2639 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB); 2640 /* Flush */ 2641 CSR_READ_4(sc, JME_TXCSR); 2642 CSR_READ_4(sc, JME_RXCSR); 2643 2644 /* Enable TXMAC and TXOFL clock sources */ 2645 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2646 /* Eisable RXMAC clock source */ 2647 val = CSR_READ_4(sc, JME_GPREG1); 2648 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2649 /* Flush */ 2650 CSR_READ_4(sc, JME_GHC); 2651 2652 /* Stop TX and RX */ 2653 jme_stop_tx(sc); 2654 jme_stop_rx(sc); 2655 } 2656 2657 static void 2658 jme_init(void *xsc) 2659 { 2660 struct jme_softc *sc = xsc; 2661 struct ifnet *ifp = &sc->arpcom.ac_if; 2662 struct mii_data *mii; 2663 uint8_t eaddr[ETHER_ADDR_LEN]; 2664 bus_addr_t paddr; 2665 uint32_t reg; 2666 int error, r; 2667 2668 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2669 2670 /* 2671 * Cancel any pending I/O. 2672 */ 2673 jme_stop(sc); 2674 2675 /* 2676 * Reset the chip to a known state. 2677 */ 2678 jme_reset(sc); 2679 2680 /* 2681 * Setup MSI/MSI-X vectors to interrupts mapping 2682 */ 2683 jme_set_msinum(sc); 2684 2685 if (JME_ENABLE_HWRSS(sc)) 2686 jme_enable_rss(sc); 2687 else 2688 jme_disable_rss(sc); 2689 2690 /* Init RX descriptors */ 2691 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2692 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]); 2693 if (error) { 2694 if_printf(ifp, "initialization failed: " 2695 "no memory for %dth RX ring.\n", r); 2696 jme_stop(sc); 2697 return; 2698 } 2699 } 2700 2701 /* Init TX descriptors */ 2702 jme_init_tx_ring(&sc->jme_cdata.jme_tx_data); 2703 2704 /* Initialize shadow status block. */ 2705 jme_init_ssb(sc); 2706 2707 /* Reprogram the station address. */ 2708 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2709 CSR_WRITE_4(sc, JME_PAR0, 2710 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2711 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2712 2713 /* 2714 * Configure Tx queue. 2715 * Tx priority queue weight value : 0 2716 * Tx FIFO threshold for processing next packet : 16QW 2717 * Maximum Tx DMA length : 512 2718 * Allow Tx DMA burst. 2719 */ 2720 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2721 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2722 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2723 sc->jme_txcsr |= sc->jme_tx_dma_size; 2724 sc->jme_txcsr |= TXCSR_DMA_BURST; 2725 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2726 2727 /* Set Tx descriptor counter. */ 2728 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt); 2729 2730 /* Set Tx ring address to the hardware. */ 2731 paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr; 2732 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2733 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2734 2735 /* Configure TxMAC parameters. */ 2736 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2737 reg |= TXMAC_THRESH_1_PKT; 2738 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2739 CSR_WRITE_4(sc, JME_TXMAC, reg); 2740 2741 /* 2742 * Configure Rx queue. 2743 * FIFO full threshold for transmitting Tx pause packet : 128T 2744 * FIFO threshold for processing next packet : 128QW 2745 * Rx queue 0 select 2746 * Max Rx DMA length : 128 2747 * Rx descriptor retry : 32 2748 * Rx descriptor retry time gap : 256ns 2749 * Don't receive runt/bad frame. 2750 */ 2751 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2752 #if 0 2753 /* 2754 * Since Rx FIFO size is 4K bytes, receiving frames larger 2755 * than 4K bytes will suffer from Rx FIFO overruns. So 2756 * decrease FIFO threshold to reduce the FIFO overruns for 2757 * frames larger than 4000 bytes. 2758 * For best performance of standard MTU sized frames use 2759 * maximum allowable FIFO threshold, 128QW. 2760 */ 2761 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2762 JME_RX_FIFO_SIZE) 2763 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2764 else 2765 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2766 #else 2767 /* Improve PCI Express compatibility */ 2768 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2769 #endif 2770 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2771 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2772 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2773 /* XXX TODO DROP_BAD */ 2774 2775 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2776 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 2777 2778 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2779 2780 /* Set Rx descriptor counter. */ 2781 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt); 2782 2783 /* Set Rx ring address to the hardware. */ 2784 paddr = rdata->jme_rx_ring_paddr; 2785 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2786 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2787 } 2788 2789 /* Clear receive filter. */ 2790 CSR_WRITE_4(sc, JME_RXMAC, 0); 2791 2792 /* Set up the receive filter. */ 2793 jme_set_filter(sc); 2794 jme_set_vlan(sc); 2795 2796 /* 2797 * Disable all WOL bits as WOL can interfere normal Rx 2798 * operation. Also clear WOL detection status bits. 2799 */ 2800 reg = CSR_READ_4(sc, JME_PMCS); 2801 reg &= ~PMCS_WOL_ENB_MASK; 2802 CSR_WRITE_4(sc, JME_PMCS, reg); 2803 2804 /* 2805 * Pad 10bytes right before received frame. This will greatly 2806 * help Rx performance on strict-alignment architectures as 2807 * it does not need to copy the frame to align the payload. 2808 */ 2809 reg = CSR_READ_4(sc, JME_RXMAC); 2810 reg |= RXMAC_PAD_10BYTES; 2811 2812 if (ifp->if_capenable & IFCAP_RXCSUM) 2813 reg |= RXMAC_CSUM_ENB; 2814 CSR_WRITE_4(sc, JME_RXMAC, reg); 2815 2816 /* Configure general purpose reg0 */ 2817 reg = CSR_READ_4(sc, JME_GPREG0); 2818 reg &= ~GPREG0_PCC_UNIT_MASK; 2819 /* Set PCC timer resolution to micro-seconds unit. */ 2820 reg |= GPREG0_PCC_UNIT_US; 2821 /* 2822 * Disable all shadow register posting as we have to read 2823 * JME_INTR_STATUS register in jme_intr. Also it seems 2824 * that it's hard to synchronize interrupt status between 2825 * hardware and software with shadow posting due to 2826 * requirements of bus_dmamap_sync(9). 2827 */ 2828 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2829 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2830 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2831 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2832 /* Disable posting of DW0. */ 2833 reg &= ~GPREG0_POST_DW0_ENB; 2834 /* Clear PME message. */ 2835 reg &= ~GPREG0_PME_ENB; 2836 /* Set PHY address. */ 2837 reg &= ~GPREG0_PHY_ADDR_MASK; 2838 reg |= sc->jme_phyaddr; 2839 CSR_WRITE_4(sc, JME_GPREG0, reg); 2840 2841 /* Configure Tx queue 0 packet completion coalescing. */ 2842 jme_set_tx_coal(sc); 2843 2844 /* Configure Rx queues packet completion coalescing. */ 2845 jme_set_rx_coal(sc); 2846 2847 /* Configure shadow status block but don't enable posting. */ 2848 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2849 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2850 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2851 2852 /* Disable Timer 1 and Timer 2. */ 2853 CSR_WRITE_4(sc, JME_TIMER1, 0); 2854 CSR_WRITE_4(sc, JME_TIMER2, 0); 2855 2856 /* Configure retry transmit period, retry limit value. */ 2857 CSR_WRITE_4(sc, JME_TXTRHD, 2858 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2859 TXTRHD_RT_PERIOD_MASK) | 2860 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2861 TXTRHD_RT_LIMIT_SHIFT)); 2862 2863 #ifdef IFPOLL_ENABLE 2864 if (!(ifp->if_flags & IFF_NPOLLING)) 2865 #endif 2866 /* Initialize the interrupt mask. */ 2867 jme_enable_intr(sc); 2868 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2869 2870 /* 2871 * Enabling Tx/Rx DMA engines and Rx queue processing is 2872 * done after detection of valid link in jme_miibus_statchg. 2873 */ 2874 sc->jme_has_link = FALSE; 2875 2876 /* Set the current media. */ 2877 mii = device_get_softc(sc->jme_miibus); 2878 mii_mediachg(mii); 2879 2880 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 2881 JME_TICK_CPUID); 2882 2883 ifp->if_flags |= IFF_RUNNING; 2884 ifq_clr_oactive(&ifp->if_snd); 2885 } 2886 2887 static void 2888 jme_stop(struct jme_softc *sc) 2889 { 2890 struct ifnet *ifp = &sc->arpcom.ac_if; 2891 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2892 struct jme_txdesc *txd; 2893 struct jme_rxdesc *rxd; 2894 struct jme_rxdata *rdata; 2895 int i, r; 2896 2897 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2898 2899 /* 2900 * Mark the interface down and cancel the watchdog timer. 2901 */ 2902 ifp->if_flags &= ~IFF_RUNNING; 2903 ifq_clr_oactive(&ifp->if_snd); 2904 ifp->if_timer = 0; 2905 2906 callout_stop(&sc->jme_tick_ch); 2907 sc->jme_has_link = FALSE; 2908 2909 /* 2910 * Disable interrupts. 2911 */ 2912 jme_disable_intr(sc); 2913 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2914 2915 /* Disable updating shadow status block. */ 2916 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2917 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2918 2919 /* Stop receiver, transmitter. */ 2920 jme_stop_rx(sc); 2921 jme_stop_tx(sc); 2922 2923 /* 2924 * Free partial finished RX segments 2925 */ 2926 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2927 rdata = &sc->jme_cdata.jme_rx_data[r]; 2928 if (rdata->jme_rxhead != NULL) 2929 m_freem(rdata->jme_rxhead); 2930 JME_RXCHAIN_RESET(rdata); 2931 } 2932 2933 /* 2934 * Free RX and TX mbufs still in the queues. 2935 */ 2936 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2937 rdata = &sc->jme_cdata.jme_rx_data[r]; 2938 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 2939 rxd = &rdata->jme_rxdesc[i]; 2940 if (rxd->rx_m != NULL) { 2941 bus_dmamap_unload(rdata->jme_rx_tag, 2942 rxd->rx_dmamap); 2943 m_freem(rxd->rx_m); 2944 rxd->rx_m = NULL; 2945 } 2946 } 2947 } 2948 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 2949 txd = &tdata->jme_txdesc[i]; 2950 if (txd->tx_m != NULL) { 2951 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2952 m_freem(txd->tx_m); 2953 txd->tx_m = NULL; 2954 txd->tx_ndesc = 0; 2955 } 2956 } 2957 } 2958 2959 static void 2960 jme_stop_tx(struct jme_softc *sc) 2961 { 2962 uint32_t reg; 2963 int i; 2964 2965 reg = CSR_READ_4(sc, JME_TXCSR); 2966 if ((reg & TXCSR_TX_ENB) == 0) 2967 return; 2968 reg &= ~TXCSR_TX_ENB; 2969 CSR_WRITE_4(sc, JME_TXCSR, reg); 2970 for (i = JME_TIMEOUT; i > 0; i--) { 2971 DELAY(1); 2972 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2973 break; 2974 } 2975 if (i == 0) 2976 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2977 } 2978 2979 static void 2980 jme_stop_rx(struct jme_softc *sc) 2981 { 2982 uint32_t reg; 2983 int i; 2984 2985 reg = CSR_READ_4(sc, JME_RXCSR); 2986 if ((reg & RXCSR_RX_ENB) == 0) 2987 return; 2988 reg &= ~RXCSR_RX_ENB; 2989 CSR_WRITE_4(sc, JME_RXCSR, reg); 2990 for (i = JME_TIMEOUT; i > 0; i--) { 2991 DELAY(1); 2992 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2993 break; 2994 } 2995 if (i == 0) 2996 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2997 } 2998 2999 static void 3000 jme_init_tx_ring(struct jme_txdata *tdata) 3001 { 3002 struct jme_txdesc *txd; 3003 int i; 3004 3005 tdata->jme_tx_prod = 0; 3006 tdata->jme_tx_cons = 0; 3007 tdata->jme_tx_cnt = 0; 3008 3009 bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata)); 3010 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 3011 txd = &tdata->jme_txdesc[i]; 3012 txd->tx_m = NULL; 3013 txd->tx_desc = &tdata->jme_tx_ring[i]; 3014 txd->tx_ndesc = 0; 3015 } 3016 } 3017 3018 static void 3019 jme_init_ssb(struct jme_softc *sc) 3020 { 3021 struct jme_chain_data *cd; 3022 3023 cd = &sc->jme_cdata; 3024 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 3025 } 3026 3027 static int 3028 jme_init_rx_ring(struct jme_rxdata *rdata) 3029 { 3030 struct jme_rxdesc *rxd; 3031 int i; 3032 3033 KKASSERT(rdata->jme_rxhead == NULL && 3034 rdata->jme_rxtail == NULL && 3035 rdata->jme_rxlen == 0); 3036 rdata->jme_rx_cons = 0; 3037 3038 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata)); 3039 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3040 int error; 3041 3042 rxd = &rdata->jme_rxdesc[i]; 3043 rxd->rx_m = NULL; 3044 rxd->rx_desc = &rdata->jme_rx_ring[i]; 3045 error = jme_newbuf(rdata, rxd, 1); 3046 if (error) 3047 return error; 3048 } 3049 return 0; 3050 } 3051 3052 static int 3053 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init) 3054 { 3055 struct mbuf *m; 3056 bus_dma_segment_t segs; 3057 bus_dmamap_t map; 3058 int error, nsegs; 3059 3060 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3061 if (m == NULL) 3062 return ENOBUFS; 3063 /* 3064 * JMC250 has 64bit boundary alignment limitation so jme(4) 3065 * takes advantage of 10 bytes padding feature of hardware 3066 * in order not to copy entire frame to align IP header on 3067 * 32bit boundary. 3068 */ 3069 m->m_len = m->m_pkthdr.len = MCLBYTES; 3070 3071 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 3072 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 3073 BUS_DMA_NOWAIT); 3074 if (error) { 3075 m_freem(m); 3076 if (init) { 3077 if_printf(&rdata->jme_sc->arpcom.ac_if, 3078 "can't load RX mbuf\n"); 3079 } 3080 return error; 3081 } 3082 3083 if (rxd->rx_m != NULL) { 3084 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 3085 BUS_DMASYNC_POSTREAD); 3086 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 3087 } 3088 map = rxd->rx_dmamap; 3089 rxd->rx_dmamap = rdata->jme_rx_sparemap; 3090 rdata->jme_rx_sparemap = map; 3091 rxd->rx_m = m; 3092 rxd->rx_paddr = segs.ds_addr; 3093 3094 jme_setup_rxdesc(rxd); 3095 return 0; 3096 } 3097 3098 static void 3099 jme_set_vlan(struct jme_softc *sc) 3100 { 3101 struct ifnet *ifp = &sc->arpcom.ac_if; 3102 uint32_t reg; 3103 3104 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3105 3106 reg = CSR_READ_4(sc, JME_RXMAC); 3107 reg &= ~RXMAC_VLAN_ENB; 3108 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 3109 reg |= RXMAC_VLAN_ENB; 3110 CSR_WRITE_4(sc, JME_RXMAC, reg); 3111 } 3112 3113 static void 3114 jme_set_filter(struct jme_softc *sc) 3115 { 3116 struct ifnet *ifp = &sc->arpcom.ac_if; 3117 struct ifmultiaddr *ifma; 3118 uint32_t crc; 3119 uint32_t mchash[2]; 3120 uint32_t rxcfg; 3121 3122 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3123 3124 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3125 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3126 RXMAC_ALLMULTI); 3127 3128 /* 3129 * Always accept frames destined to our station address. 3130 * Always accept broadcast frames. 3131 */ 3132 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 3133 3134 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 3135 if (ifp->if_flags & IFF_PROMISC) 3136 rxcfg |= RXMAC_PROMISC; 3137 if (ifp->if_flags & IFF_ALLMULTI) 3138 rxcfg |= RXMAC_ALLMULTI; 3139 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3140 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3141 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3142 return; 3143 } 3144 3145 /* 3146 * Set up the multicast address filter by passing all multicast 3147 * addresses through a CRC generator, and then using the low-order 3148 * 6 bits as an index into the 64 bit multicast hash table. The 3149 * high order bits select the register, while the rest of the bits 3150 * select the bit within the register. 3151 */ 3152 rxcfg |= RXMAC_MULTICAST; 3153 bzero(mchash, sizeof(mchash)); 3154 3155 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3156 if (ifma->ifma_addr->sa_family != AF_LINK) 3157 continue; 3158 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3159 ifma->ifma_addr), ETHER_ADDR_LEN); 3160 3161 /* Just want the 6 least significant bits. */ 3162 crc &= 0x3f; 3163 3164 /* Set the corresponding bit in the hash table. */ 3165 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3166 } 3167 3168 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3169 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3170 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3171 } 3172 3173 static int 3174 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 3175 { 3176 struct jme_softc *sc = arg1; 3177 struct ifnet *ifp = &sc->arpcom.ac_if; 3178 int error, v; 3179 3180 ifnet_serialize_all(ifp); 3181 3182 v = sc->jme_tx_coal_to; 3183 error = sysctl_handle_int(oidp, &v, 0, req); 3184 if (error || req->newptr == NULL) 3185 goto back; 3186 3187 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 3188 error = EINVAL; 3189 goto back; 3190 } 3191 3192 if (v != sc->jme_tx_coal_to) { 3193 sc->jme_tx_coal_to = v; 3194 if (ifp->if_flags & IFF_RUNNING) 3195 jme_set_tx_coal(sc); 3196 } 3197 back: 3198 ifnet_deserialize_all(ifp); 3199 return error; 3200 } 3201 3202 static int 3203 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3204 { 3205 struct jme_softc *sc = arg1; 3206 struct ifnet *ifp = &sc->arpcom.ac_if; 3207 int error, v; 3208 3209 ifnet_serialize_all(ifp); 3210 3211 v = sc->jme_tx_coal_pkt; 3212 error = sysctl_handle_int(oidp, &v, 0, req); 3213 if (error || req->newptr == NULL) 3214 goto back; 3215 3216 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 3217 error = EINVAL; 3218 goto back; 3219 } 3220 3221 if (v != sc->jme_tx_coal_pkt) { 3222 sc->jme_tx_coal_pkt = v; 3223 if (ifp->if_flags & IFF_RUNNING) 3224 jme_set_tx_coal(sc); 3225 } 3226 back: 3227 ifnet_deserialize_all(ifp); 3228 return error; 3229 } 3230 3231 static int 3232 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 3233 { 3234 struct jme_softc *sc = arg1; 3235 struct ifnet *ifp = &sc->arpcom.ac_if; 3236 int error, v; 3237 3238 ifnet_serialize_all(ifp); 3239 3240 v = sc->jme_rx_coal_to; 3241 error = sysctl_handle_int(oidp, &v, 0, req); 3242 if (error || req->newptr == NULL) 3243 goto back; 3244 3245 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 3246 error = EINVAL; 3247 goto back; 3248 } 3249 3250 if (v != sc->jme_rx_coal_to) { 3251 sc->jme_rx_coal_to = v; 3252 if (ifp->if_flags & IFF_RUNNING) 3253 jme_set_rx_coal(sc); 3254 } 3255 back: 3256 ifnet_deserialize_all(ifp); 3257 return error; 3258 } 3259 3260 static int 3261 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3262 { 3263 struct jme_softc *sc = arg1; 3264 struct ifnet *ifp = &sc->arpcom.ac_if; 3265 int error, v; 3266 3267 ifnet_serialize_all(ifp); 3268 3269 v = sc->jme_rx_coal_pkt; 3270 error = sysctl_handle_int(oidp, &v, 0, req); 3271 if (error || req->newptr == NULL) 3272 goto back; 3273 3274 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 3275 error = EINVAL; 3276 goto back; 3277 } 3278 3279 if (v != sc->jme_rx_coal_pkt) { 3280 sc->jme_rx_coal_pkt = v; 3281 if (ifp->if_flags & IFF_RUNNING) 3282 jme_set_rx_coal(sc); 3283 } 3284 back: 3285 ifnet_deserialize_all(ifp); 3286 return error; 3287 } 3288 3289 static void 3290 jme_set_tx_coal(struct jme_softc *sc) 3291 { 3292 uint32_t reg; 3293 3294 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3295 PCCTX_COAL_TO_MASK; 3296 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3297 PCCTX_COAL_PKT_MASK; 3298 reg |= PCCTX_COAL_TXQ0; 3299 CSR_WRITE_4(sc, JME_PCCTX, reg); 3300 } 3301 3302 static void 3303 jme_set_rx_coal(struct jme_softc *sc) 3304 { 3305 uint32_t reg; 3306 int r; 3307 3308 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3309 PCCRX_COAL_TO_MASK; 3310 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3311 PCCRX_COAL_PKT_MASK; 3312 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) 3313 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 3314 } 3315 3316 #ifdef IFPOLL_ENABLE 3317 3318 static void 3319 jme_npoll_status(struct ifnet *ifp) 3320 { 3321 struct jme_softc *sc = ifp->if_softc; 3322 uint32_t status; 3323 3324 ASSERT_SERIALIZED(&sc->jme_serialize); 3325 3326 status = CSR_READ_4(sc, JME_INTR_STATUS); 3327 if (status & INTR_RXQ_DESC_EMPTY) { 3328 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3329 jme_rx_restart(sc, status); 3330 } 3331 } 3332 3333 static void 3334 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3335 { 3336 struct jme_rxdata *rdata = arg; 3337 3338 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3339 3340 jme_rxeof(rdata, cycle); 3341 } 3342 3343 static void 3344 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3345 { 3346 struct jme_txdata *tdata = arg; 3347 3348 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3349 3350 jme_txeof(tdata); 3351 if (!ifq_is_empty(&ifp->if_snd)) 3352 if_devstart(ifp); 3353 } 3354 3355 static void 3356 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3357 { 3358 struct jme_softc *sc = ifp->if_softc; 3359 3360 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3361 3362 if (info) { 3363 int i, off; 3364 3365 info->ifpi_status.status_func = jme_npoll_status; 3366 info->ifpi_status.serializer = &sc->jme_serialize; 3367 3368 off = sc->jme_npoll_txoff; 3369 KKASSERT(off <= ncpus2); 3370 info->ifpi_tx[off].poll_func = jme_npoll_tx; 3371 info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data; 3372 info->ifpi_tx[off].serializer = 3373 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3374 3375 off = sc->jme_npoll_rxoff; 3376 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3377 struct jme_rxdata *rdata = 3378 &sc->jme_cdata.jme_rx_data[i]; 3379 int idx = i + off; 3380 3381 info->ifpi_rx[idx].poll_func = jme_npoll_rx; 3382 info->ifpi_rx[idx].arg = rdata; 3383 info->ifpi_rx[idx].serializer = 3384 &rdata->jme_rx_serialize; 3385 } 3386 3387 if (ifp->if_flags & IFF_RUNNING) 3388 jme_disable_intr(sc); 3389 ifq_set_cpuid(&ifp->if_snd, sc->jme_npoll_txoff); 3390 } else { 3391 if (ifp->if_flags & IFF_RUNNING) 3392 jme_enable_intr(sc); 3393 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid); 3394 } 3395 } 3396 3397 static int 3398 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3399 { 3400 struct jme_softc *sc = (void *)arg1; 3401 struct ifnet *ifp = &sc->arpcom.ac_if; 3402 int error, off; 3403 3404 off = sc->jme_npoll_rxoff; 3405 error = sysctl_handle_int(oidp, &off, 0, req); 3406 if (error || req->newptr == NULL) 3407 return error; 3408 if (off < 0) 3409 return EINVAL; 3410 3411 ifnet_serialize_all(ifp); 3412 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3413 error = EINVAL; 3414 } else { 3415 error = 0; 3416 sc->jme_npoll_rxoff = off; 3417 } 3418 ifnet_deserialize_all(ifp); 3419 3420 return error; 3421 } 3422 3423 static int 3424 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3425 { 3426 struct jme_softc *sc = (void *)arg1; 3427 struct ifnet *ifp = &sc->arpcom.ac_if; 3428 int error, off; 3429 3430 off = sc->jme_npoll_txoff; 3431 error = sysctl_handle_int(oidp, &off, 0, req); 3432 if (error || req->newptr == NULL) 3433 return error; 3434 if (off < 0) 3435 return EINVAL; 3436 3437 ifnet_serialize_all(ifp); 3438 if (off >= ncpus2) { 3439 error = EINVAL; 3440 } else { 3441 error = 0; 3442 sc->jme_npoll_txoff = off; 3443 } 3444 ifnet_deserialize_all(ifp); 3445 3446 return error; 3447 } 3448 3449 #endif /* IFPOLL_ENABLE */ 3450 3451 static int 3452 jme_rxring_dma_alloc(struct jme_rxdata *rdata) 3453 { 3454 bus_dmamem_t dmem; 3455 int error, asize; 3456 3457 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN); 3458 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag, 3459 JME_RX_RING_ALIGN, 0, 3460 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3461 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3462 if (error) { 3463 device_printf(rdata->jme_sc->jme_dev, 3464 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx); 3465 return error; 3466 } 3467 rdata->jme_rx_ring_tag = dmem.dmem_tag; 3468 rdata->jme_rx_ring_map = dmem.dmem_map; 3469 rdata->jme_rx_ring = dmem.dmem_addr; 3470 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 3471 3472 return 0; 3473 } 3474 3475 static int 3476 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr) 3477 { 3478 if ((paddr & 0xffffffff) == 0) { 3479 /* 3480 * Don't allow lower 32bits of the RX buffer's 3481 * physical address to be 0, else it will break 3482 * hardware pending RSS information delivery 3483 * detection on RX path. 3484 */ 3485 return 1; 3486 } 3487 return 0; 3488 } 3489 3490 static int 3491 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata) 3492 { 3493 bus_addr_t lowaddr; 3494 int i, error; 3495 3496 lowaddr = BUS_SPACE_MAXADDR; 3497 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 3498 /* jme_rxbuf_dma_filter will be called */ 3499 lowaddr = BUS_SPACE_MAXADDR_32BIT; 3500 } 3501 3502 /* Create tag for Rx buffers. */ 3503 error = bus_dma_tag_create( 3504 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */ 3505 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3506 lowaddr, /* lowaddr */ 3507 BUS_SPACE_MAXADDR, /* highaddr */ 3508 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */ 3509 MCLBYTES, /* maxsize */ 3510 1, /* nsegments */ 3511 MCLBYTES, /* maxsegsize */ 3512 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3513 &rdata->jme_rx_tag); 3514 if (error) { 3515 device_printf(rdata->jme_sc->jme_dev, 3516 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx); 3517 return error; 3518 } 3519 3520 /* Create DMA maps for Rx buffers. */ 3521 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3522 &rdata->jme_rx_sparemap); 3523 if (error) { 3524 device_printf(rdata->jme_sc->jme_dev, 3525 "could not create %dth spare Rx dmamap.\n", 3526 rdata->jme_rx_idx); 3527 bus_dma_tag_destroy(rdata->jme_rx_tag); 3528 rdata->jme_rx_tag = NULL; 3529 return error; 3530 } 3531 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3532 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3533 3534 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3535 &rxd->rx_dmamap); 3536 if (error) { 3537 int j; 3538 3539 device_printf(rdata->jme_sc->jme_dev, 3540 "could not create %dth Rx dmamap " 3541 "for %dth RX ring.\n", i, rdata->jme_rx_idx); 3542 3543 for (j = 0; j < i; ++j) { 3544 rxd = &rdata->jme_rxdesc[j]; 3545 bus_dmamap_destroy(rdata->jme_rx_tag, 3546 rxd->rx_dmamap); 3547 } 3548 bus_dmamap_destroy(rdata->jme_rx_tag, 3549 rdata->jme_rx_sparemap); 3550 bus_dma_tag_destroy(rdata->jme_rx_tag); 3551 rdata->jme_rx_tag = NULL; 3552 return error; 3553 } 3554 } 3555 return 0; 3556 } 3557 3558 static void 3559 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3560 { 3561 int r; 3562 3563 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3564 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3565 3566 if (status & rdata->jme_rx_coal) { 3567 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3568 jme_rxeof(rdata, -1); 3569 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3570 } 3571 } 3572 } 3573 3574 static void 3575 jme_enable_rss(struct jme_softc *sc) 3576 { 3577 uint32_t rssc, ind; 3578 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE]; 3579 int i; 3580 3581 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 || 3582 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4, 3583 ("%s: invalid # of RX rings (%d)", 3584 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt)); 3585 3586 rssc = RSSC_HASH_64_ENTRY; 3587 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3588 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1; 3589 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3590 CSR_WRITE_4(sc, JME_RSSC, rssc); 3591 3592 toeplitz_get_key(key, sizeof(key)); 3593 for (i = 0; i < RSSKEY_NREGS; ++i) { 3594 uint32_t keyreg; 3595 3596 keyreg = RSSKEY_REGVAL(key, i); 3597 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg); 3598 3599 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg); 3600 } 3601 3602 /* 3603 * Create redirect table in following fashion: 3604 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3605 */ 3606 ind = 0; 3607 for (i = 0; i < RSSTBL_REGSIZE; ++i) { 3608 int q; 3609 3610 q = i % sc->jme_cdata.jme_rx_ring_cnt; 3611 ind |= q << (i * 8); 3612 } 3613 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3614 3615 for (i = 0; i < RSSTBL_NREGS; ++i) 3616 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3617 } 3618 3619 static void 3620 jme_disable_rss(struct jme_softc *sc) 3621 { 3622 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3623 } 3624 3625 static void 3626 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3627 { 3628 struct jme_softc *sc = ifp->if_softc; 3629 3630 ifnet_serialize_array_enter(sc->jme_serialize_arr, 3631 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3632 } 3633 3634 static void 3635 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3636 { 3637 struct jme_softc *sc = ifp->if_softc; 3638 3639 ifnet_serialize_array_exit(sc->jme_serialize_arr, 3640 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3641 } 3642 3643 static int 3644 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3645 { 3646 struct jme_softc *sc = ifp->if_softc; 3647 3648 return ifnet_serialize_array_try(sc->jme_serialize_arr, 3649 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3650 } 3651 3652 #ifdef INVARIANTS 3653 3654 static void 3655 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3656 boolean_t serialized) 3657 { 3658 struct jme_softc *sc = ifp->if_softc; 3659 3660 ifnet_serialize_array_assert(sc->jme_serialize_arr, 3661 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, 3662 slz, serialized); 3663 } 3664 3665 #endif /* INVARIANTS */ 3666 3667 static void 3668 jme_msix_try_alloc(device_t dev) 3669 { 3670 struct jme_softc *sc = device_get_softc(dev); 3671 struct jme_msix_data *msix; 3672 int error, i, r, msix_enable, msix_count; 3673 int offset, offset_def; 3674 3675 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt); 3676 KKASSERT(msix_count <= JME_NMSIX); 3677 3678 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable); 3679 3680 /* 3681 * We leave the 1st MSI-X vector unused, so we 3682 * actually need msix_count + 1 MSI-X vectors. 3683 */ 3684 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1)) 3685 return; 3686 3687 for (i = 0; i < msix_count; ++i) 3688 sc->jme_msix[i].jme_msix_rid = -1; 3689 3690 i = 0; 3691 3692 /* 3693 * Setup status MSI-X 3694 */ 3695 3696 msix = &sc->jme_msix[i++]; 3697 msix->jme_msix_cpuid = 0; 3698 msix->jme_msix_arg = sc; 3699 msix->jme_msix_func = jme_msix_status; 3700 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3701 msix->jme_msix_intrs |= 3702 sc->jme_cdata.jme_rx_data[r].jme_rx_empty; 3703 } 3704 msix->jme_msix_serialize = &sc->jme_serialize; 3705 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts", 3706 device_get_nameunit(dev)); 3707 3708 /* 3709 * Setup TX MSI-X 3710 */ 3711 3712 offset_def = device_get_unit(dev) % ncpus2; 3713 offset = device_getenv_int(dev, "msix.txoff", offset_def); 3714 if (offset >= ncpus2) { 3715 device_printf(dev, "invalid msix.txoff %d, use %d\n", 3716 offset, offset_def); 3717 offset = offset_def; 3718 } 3719 3720 msix = &sc->jme_msix[i++]; 3721 msix->jme_msix_cpuid = offset; 3722 sc->jme_tx_cpuid = msix->jme_msix_cpuid; 3723 msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data; 3724 msix->jme_msix_func = jme_msix_tx; 3725 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO; 3726 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3727 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx", 3728 device_get_nameunit(dev)); 3729 3730 /* 3731 * Setup RX MSI-X 3732 */ 3733 3734 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 3735 offset = 0; 3736 } else { 3737 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 3738 device_get_unit(dev)) % ncpus2; 3739 3740 offset = device_getenv_int(dev, "msix.rxoff", offset_def); 3741 if (offset >= ncpus2 || 3742 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3743 device_printf(dev, "invalid msix.rxoff %d, use %d\n", 3744 offset, offset_def); 3745 offset = offset_def; 3746 } 3747 } 3748 3749 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3750 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3751 3752 msix = &sc->jme_msix[i++]; 3753 msix->jme_msix_cpuid = r + offset; 3754 KKASSERT(msix->jme_msix_cpuid < ncpus2); 3755 msix->jme_msix_arg = rdata; 3756 msix->jme_msix_func = jme_msix_rx; 3757 msix->jme_msix_intrs = rdata->jme_rx_coal; 3758 msix->jme_msix_serialize = &rdata->jme_rx_serialize; 3759 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), 3760 "%s rx%d", device_get_nameunit(dev), r); 3761 } 3762 3763 KKASSERT(i == msix_count); 3764 3765 error = pci_setup_msix(dev); 3766 if (error) 3767 return; 3768 3769 /* Setup jme_msix_cnt early, so we could cleanup */ 3770 sc->jme_msix_cnt = msix_count; 3771 3772 for (i = 0; i < msix_count; ++i) { 3773 msix = &sc->jme_msix[i]; 3774 3775 msix->jme_msix_vector = i + 1; 3776 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector, 3777 &msix->jme_msix_rid, msix->jme_msix_cpuid); 3778 if (error) 3779 goto back; 3780 3781 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3782 &msix->jme_msix_rid, RF_ACTIVE); 3783 if (msix->jme_msix_res == NULL) { 3784 error = ENOMEM; 3785 goto back; 3786 } 3787 } 3788 3789 for (i = 0; i < JME_INTR_CNT; ++i) { 3790 uint32_t intr_mask = (1 << i); 3791 int x; 3792 3793 if ((JME_INTRS & intr_mask) == 0) 3794 continue; 3795 3796 for (x = 0; x < msix_count; ++x) { 3797 msix = &sc->jme_msix[x]; 3798 if (msix->jme_msix_intrs & intr_mask) { 3799 int reg, shift; 3800 3801 reg = i / JME_MSINUM_FACTOR; 3802 KKASSERT(reg < JME_MSINUM_CNT); 3803 3804 shift = (i % JME_MSINUM_FACTOR) * 4; 3805 3806 sc->jme_msinum[reg] |= 3807 (msix->jme_msix_vector << shift); 3808 3809 break; 3810 } 3811 } 3812 } 3813 3814 if (bootverbose) { 3815 for (i = 0; i < JME_MSINUM_CNT; ++i) { 3816 device_printf(dev, "MSINUM%d: %#x\n", i, 3817 sc->jme_msinum[i]); 3818 } 3819 } 3820 3821 pci_enable_msix(dev); 3822 sc->jme_irq_type = PCI_INTR_TYPE_MSIX; 3823 3824 back: 3825 if (error) 3826 jme_msix_free(dev); 3827 } 3828 3829 static int 3830 jme_intr_alloc(device_t dev) 3831 { 3832 struct jme_softc *sc = device_get_softc(dev); 3833 u_int irq_flags; 3834 3835 jme_msix_try_alloc(dev); 3836 3837 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3838 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable, 3839 &sc->jme_irq_rid, &irq_flags); 3840 3841 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3842 &sc->jme_irq_rid, irq_flags); 3843 if (sc->jme_irq_res == NULL) { 3844 device_printf(dev, "can't allocate irq\n"); 3845 return ENXIO; 3846 } 3847 } 3848 return 0; 3849 } 3850 3851 static void 3852 jme_msix_free(device_t dev) 3853 { 3854 struct jme_softc *sc = device_get_softc(dev); 3855 int i; 3856 3857 KKASSERT(sc->jme_msix_cnt > 1); 3858 3859 for (i = 0; i < sc->jme_msix_cnt; ++i) { 3860 struct jme_msix_data *msix = &sc->jme_msix[i]; 3861 3862 if (msix->jme_msix_res != NULL) { 3863 bus_release_resource(dev, SYS_RES_IRQ, 3864 msix->jme_msix_rid, msix->jme_msix_res); 3865 msix->jme_msix_res = NULL; 3866 } 3867 if (msix->jme_msix_rid >= 0) { 3868 pci_release_msix_vector(dev, msix->jme_msix_rid); 3869 msix->jme_msix_rid = -1; 3870 } 3871 } 3872 pci_teardown_msix(dev); 3873 } 3874 3875 static void 3876 jme_intr_free(device_t dev) 3877 { 3878 struct jme_softc *sc = device_get_softc(dev); 3879 3880 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3881 if (sc->jme_irq_res != NULL) { 3882 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 3883 sc->jme_irq_res); 3884 } 3885 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI) 3886 pci_release_msi(dev); 3887 } else { 3888 jme_msix_free(dev); 3889 } 3890 } 3891 3892 static void 3893 jme_msix_tx(void *xtdata) 3894 { 3895 struct jme_txdata *tdata = xtdata; 3896 struct jme_softc *sc = tdata->jme_sc; 3897 struct ifnet *ifp = &sc->arpcom.ac_if; 3898 3899 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3900 3901 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3902 3903 CSR_WRITE_4(sc, JME_INTR_STATUS, 3904 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP); 3905 3906 if (ifp->if_flags & IFF_RUNNING) { 3907 jme_txeof(tdata); 3908 if (!ifq_is_empty(&ifp->if_snd)) 3909 if_devstart(ifp); 3910 } 3911 3912 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3913 } 3914 3915 static void 3916 jme_msix_rx(void *xrdata) 3917 { 3918 struct jme_rxdata *rdata = xrdata; 3919 struct jme_softc *sc = rdata->jme_sc; 3920 struct ifnet *ifp = &sc->arpcom.ac_if; 3921 3922 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3923 3924 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal); 3925 3926 CSR_WRITE_4(sc, JME_INTR_STATUS, 3927 rdata->jme_rx_coal | rdata->jme_rx_comp); 3928 3929 if (ifp->if_flags & IFF_RUNNING) 3930 jme_rxeof(rdata, -1); 3931 3932 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal); 3933 } 3934 3935 static void 3936 jme_msix_status(void *xsc) 3937 { 3938 struct jme_softc *sc = xsc; 3939 struct ifnet *ifp = &sc->arpcom.ac_if; 3940 uint32_t status; 3941 3942 ASSERT_SERIALIZED(&sc->jme_serialize); 3943 3944 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY); 3945 3946 status = CSR_READ_4(sc, JME_INTR_STATUS); 3947 3948 if (status & INTR_RXQ_DESC_EMPTY) { 3949 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3950 if (ifp->if_flags & IFF_RUNNING) 3951 jme_rx_restart(sc, status); 3952 } 3953 3954 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY); 3955 } 3956 3957 static void 3958 jme_rx_restart(struct jme_softc *sc, uint32_t status) 3959 { 3960 int i; 3961 3962 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3963 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 3964 3965 if (status & rdata->jme_rx_empty) { 3966 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3967 jme_rxeof(rdata, -1); 3968 #ifdef JME_RSS_DEBUG 3969 rdata->jme_rx_emp++; 3970 #endif 3971 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3972 } 3973 } 3974 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 3975 RXCSR_RXQ_START); 3976 } 3977 3978 static void 3979 jme_set_msinum(struct jme_softc *sc) 3980 { 3981 int i; 3982 3983 for (i = 0; i < JME_MSINUM_CNT; ++i) 3984 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]); 3985 } 3986 3987 static int 3988 jme_intr_setup(device_t dev) 3989 { 3990 struct jme_softc *sc = device_get_softc(dev); 3991 int error; 3992 3993 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3994 return jme_msix_setup(dev); 3995 3996 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, 3997 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize); 3998 if (error) { 3999 device_printf(dev, "could not set up interrupt handler.\n"); 4000 return error; 4001 } 4002 sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res); 4003 4004 return 0; 4005 } 4006 4007 static void 4008 jme_intr_teardown(device_t dev) 4009 { 4010 struct jme_softc *sc = device_get_softc(dev); 4011 4012 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 4013 jme_msix_teardown(dev, sc->jme_msix_cnt); 4014 else 4015 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 4016 } 4017 4018 static int 4019 jme_msix_setup(device_t dev) 4020 { 4021 struct jme_softc *sc = device_get_softc(dev); 4022 int x; 4023 4024 for (x = 0; x < sc->jme_msix_cnt; ++x) { 4025 struct jme_msix_data *msix = &sc->jme_msix[x]; 4026 int error; 4027 4028 error = bus_setup_intr_descr(dev, msix->jme_msix_res, 4029 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg, 4030 &msix->jme_msix_handle, msix->jme_msix_serialize, 4031 msix->jme_msix_desc); 4032 if (error) { 4033 device_printf(dev, "could not set up %s " 4034 "interrupt handler.\n", msix->jme_msix_desc); 4035 jme_msix_teardown(dev, x); 4036 return error; 4037 } 4038 } 4039 return 0; 4040 } 4041 4042 static void 4043 jme_msix_teardown(device_t dev, int msix_count) 4044 { 4045 struct jme_softc *sc = device_get_softc(dev); 4046 int x; 4047 4048 for (x = 0; x < msix_count; ++x) { 4049 struct jme_msix_data *msix = &sc->jme_msix[x]; 4050 4051 bus_teardown_intr(dev, msix->jme_msix_res, 4052 msix->jme_msix_handle); 4053 } 4054 } 4055 4056 static void 4057 jme_serialize_skipmain(struct jme_softc *sc) 4058 { 4059 lwkt_serialize_array_enter(sc->jme_serialize_arr, 4060 sc->jme_serialize_cnt, 1); 4061 } 4062 4063 static void 4064 jme_deserialize_skipmain(struct jme_softc *sc) 4065 { 4066 lwkt_serialize_array_exit(sc->jme_serialize_arr, 4067 sc->jme_serialize_cnt, 1); 4068 } 4069 4070 static void 4071 jme_enable_intr(struct jme_softc *sc) 4072 { 4073 int i; 4074 4075 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4076 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]); 4077 4078 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 4079 } 4080 4081 static void 4082 jme_disable_intr(struct jme_softc *sc) 4083 { 4084 int i; 4085 4086 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 4087 4088 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4089 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]); 4090 } 4091