1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 */ 29 30 #include "opt_ifpoll.h" 31 #include "opt_jme.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/serialize2.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_poll.h> 54 #include <net/ifq_var.h> 55 #include <net/toeplitz.h> 56 #include <net/toeplitz2.h> 57 #include <net/vlan/if_vlan_var.h> 58 #include <net/vlan/if_vlan_ether.h> 59 60 #include <netinet/ip.h> 61 #include <netinet/tcp.h> 62 63 #include <dev/netif/mii_layer/miivar.h> 64 #include <dev/netif/mii_layer/jmphyreg.h> 65 66 #include <bus/pci/pcireg.h> 67 #include <bus/pci/pcivar.h> 68 #include <bus/pci/pcidevs.h> 69 70 #include <dev/netif/jme/if_jmereg.h> 71 #include <dev/netif/jme/if_jmevar.h> 72 73 #include "miibus_if.h" 74 75 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */ 76 77 #define JME_TX_SERIALIZE 1 78 #define JME_RX_SERIALIZE 2 79 80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 81 82 #ifdef JME_RSS_DEBUG 83 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 84 do { \ 85 if ((sc)->jme_rss_debug >= (lvl)) \ 86 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 87 } while (0) 88 #else /* !JME_RSS_DEBUG */ 89 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 90 #endif /* JME_RSS_DEBUG */ 91 92 static int jme_probe(device_t); 93 static int jme_attach(device_t); 94 static int jme_detach(device_t); 95 static int jme_shutdown(device_t); 96 static int jme_suspend(device_t); 97 static int jme_resume(device_t); 98 99 static int jme_miibus_readreg(device_t, int, int); 100 static int jme_miibus_writereg(device_t, int, int, int); 101 static void jme_miibus_statchg(device_t); 102 103 static void jme_init(void *); 104 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 105 static void jme_start(struct ifnet *); 106 static void jme_watchdog(struct ifnet *); 107 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 108 static int jme_mediachange(struct ifnet *); 109 #ifdef IFPOLL_ENABLE 110 static void jme_npoll(struct ifnet *, struct ifpoll_info *); 111 static void jme_npoll_status(struct ifnet *); 112 static void jme_npoll_rx(struct ifnet *, void *, int); 113 static void jme_npoll_tx(struct ifnet *, void *, int); 114 #endif 115 static void jme_serialize(struct ifnet *, enum ifnet_serialize); 116 static void jme_deserialize(struct ifnet *, enum ifnet_serialize); 117 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize); 118 #ifdef INVARIANTS 119 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize, 120 boolean_t); 121 #endif 122 123 static void jme_intr(void *); 124 static void jme_msix_tx(void *); 125 static void jme_msix_rx(void *); 126 static void jme_msix_status(void *); 127 static void jme_txeof(struct jme_txdata *); 128 static void jme_rxeof(struct jme_rxdata *, int); 129 static void jme_rx_intr(struct jme_softc *, uint32_t); 130 static void jme_enable_intr(struct jme_softc *); 131 static void jme_disable_intr(struct jme_softc *); 132 static void jme_rx_restart(struct jme_softc *, uint32_t); 133 134 static int jme_msix_setup(device_t); 135 static void jme_msix_teardown(device_t, int); 136 static int jme_intr_setup(device_t); 137 static void jme_intr_teardown(device_t); 138 static void jme_msix_try_alloc(device_t); 139 static void jme_msix_free(device_t); 140 static int jme_intr_alloc(device_t); 141 static void jme_intr_free(device_t); 142 static int jme_dma_alloc(struct jme_softc *); 143 static void jme_dma_free(struct jme_softc *); 144 static int jme_init_rx_ring(struct jme_rxdata *); 145 static void jme_init_tx_ring(struct jme_txdata *); 146 static void jme_init_ssb(struct jme_softc *); 147 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int); 148 static int jme_encap(struct jme_txdata *, struct mbuf **, int *); 149 static void jme_rxpkt(struct jme_rxdata *); 150 static int jme_rxring_dma_alloc(struct jme_rxdata *); 151 static int jme_rxbuf_dma_alloc(struct jme_rxdata *); 152 static int jme_rxbuf_dma_filter(void *, bus_addr_t); 153 154 static void jme_tick(void *); 155 static void jme_stop(struct jme_softc *); 156 static void jme_reset(struct jme_softc *); 157 static void jme_set_msinum(struct jme_softc *); 158 static void jme_set_vlan(struct jme_softc *); 159 static void jme_set_filter(struct jme_softc *); 160 static void jme_stop_tx(struct jme_softc *); 161 static void jme_stop_rx(struct jme_softc *); 162 static void jme_mac_config(struct jme_softc *); 163 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 164 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 165 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 166 #ifdef notyet 167 static void jme_setwol(struct jme_softc *); 168 static void jme_setlinkspeed(struct jme_softc *); 169 #endif 170 static void jme_set_tx_coal(struct jme_softc *); 171 static void jme_set_rx_coal(struct jme_softc *); 172 static void jme_enable_rss(struct jme_softc *); 173 static void jme_disable_rss(struct jme_softc *); 174 static void jme_serialize_skipmain(struct jme_softc *); 175 static void jme_deserialize_skipmain(struct jme_softc *); 176 177 static void jme_sysctl_node(struct jme_softc *); 178 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 179 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 180 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 181 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 182 #ifdef IFPOLL_ENABLE 183 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 184 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 185 #endif 186 187 /* 188 * Devices supported by this driver. 189 */ 190 static const struct jme_dev { 191 uint16_t jme_vendorid; 192 uint16_t jme_deviceid; 193 uint32_t jme_caps; 194 const char *jme_name; 195 } jme_devs[] = { 196 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 197 JME_CAP_JUMBO, 198 "JMicron Inc, JMC250 Gigabit Ethernet" }, 199 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 200 JME_CAP_FASTETH, 201 "JMicron Inc, JMC260 Fast Ethernet" }, 202 { 0, 0, 0, NULL } 203 }; 204 205 static device_method_t jme_methods[] = { 206 /* Device interface. */ 207 DEVMETHOD(device_probe, jme_probe), 208 DEVMETHOD(device_attach, jme_attach), 209 DEVMETHOD(device_detach, jme_detach), 210 DEVMETHOD(device_shutdown, jme_shutdown), 211 DEVMETHOD(device_suspend, jme_suspend), 212 DEVMETHOD(device_resume, jme_resume), 213 214 /* Bus interface. */ 215 DEVMETHOD(bus_print_child, bus_generic_print_child), 216 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 217 218 /* MII interface. */ 219 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 220 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 221 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 222 223 { NULL, NULL } 224 }; 225 226 static driver_t jme_driver = { 227 "jme", 228 jme_methods, 229 sizeof(struct jme_softc) 230 }; 231 232 static devclass_t jme_devclass; 233 234 DECLARE_DUMMY_MODULE(if_jme); 235 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 236 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL); 237 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL); 238 239 static const struct { 240 uint32_t jme_coal; 241 uint32_t jme_comp; 242 uint32_t jme_empty; 243 } jme_rx_status[JME_NRXRING_MAX] = { 244 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP, 245 INTR_RXQ0_DESC_EMPTY }, 246 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP, 247 INTR_RXQ1_DESC_EMPTY }, 248 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP, 249 INTR_RXQ2_DESC_EMPTY }, 250 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP, 251 INTR_RXQ3_DESC_EMPTY } 252 }; 253 254 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 255 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 256 static int jme_rx_ring_count = 0; 257 static int jme_msi_enable = 1; 258 static int jme_msix_enable = 1; 259 260 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 261 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 262 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 263 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable); 264 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable); 265 266 static __inline void 267 jme_setup_rxdesc(struct jme_rxdesc *rxd) 268 { 269 struct jme_desc *desc; 270 271 desc = rxd->rx_desc; 272 desc->buflen = htole32(MCLBYTES); 273 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr)); 274 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr)); 275 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 276 } 277 278 /* 279 * Read a PHY register on the MII of the JMC250. 280 */ 281 static int 282 jme_miibus_readreg(device_t dev, int phy, int reg) 283 { 284 struct jme_softc *sc = device_get_softc(dev); 285 uint32_t val; 286 int i; 287 288 /* For FPGA version, PHY address 0 should be ignored. */ 289 if (sc->jme_caps & JME_CAP_FPGA) { 290 if (phy == 0) 291 return (0); 292 } else { 293 if (sc->jme_phyaddr != phy) 294 return (0); 295 } 296 297 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 298 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 299 300 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 301 DELAY(1); 302 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 303 break; 304 } 305 if (i == 0) { 306 device_printf(sc->jme_dev, "phy read timeout: " 307 "phy %d, reg %d\n", phy, reg); 308 return (0); 309 } 310 311 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 312 } 313 314 /* 315 * Write a PHY register on the MII of the JMC250. 316 */ 317 static int 318 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 319 { 320 struct jme_softc *sc = device_get_softc(dev); 321 int i; 322 323 /* For FPGA version, PHY address 0 should be ignored. */ 324 if (sc->jme_caps & JME_CAP_FPGA) { 325 if (phy == 0) 326 return (0); 327 } else { 328 if (sc->jme_phyaddr != phy) 329 return (0); 330 } 331 332 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 333 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 334 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 335 336 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 337 DELAY(1); 338 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 339 break; 340 } 341 if (i == 0) { 342 device_printf(sc->jme_dev, "phy write timeout: " 343 "phy %d, reg %d\n", phy, reg); 344 } 345 346 return (0); 347 } 348 349 /* 350 * Callback from MII layer when media changes. 351 */ 352 static void 353 jme_miibus_statchg(device_t dev) 354 { 355 struct jme_softc *sc = device_get_softc(dev); 356 struct ifnet *ifp = &sc->arpcom.ac_if; 357 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 358 struct mii_data *mii; 359 struct jme_txdesc *txd; 360 bus_addr_t paddr; 361 int i, r; 362 363 if (sc->jme_in_tick) 364 jme_serialize_skipmain(sc); 365 ASSERT_IFNET_SERIALIZED_ALL(ifp); 366 367 if ((ifp->if_flags & IFF_RUNNING) == 0) 368 goto done; 369 370 mii = device_get_softc(sc->jme_miibus); 371 372 sc->jme_has_link = FALSE; 373 if ((mii->mii_media_status & IFM_AVALID) != 0) { 374 switch (IFM_SUBTYPE(mii->mii_media_active)) { 375 case IFM_10_T: 376 case IFM_100_TX: 377 sc->jme_has_link = TRUE; 378 break; 379 case IFM_1000_T: 380 if (sc->jme_caps & JME_CAP_FASTETH) 381 break; 382 sc->jme_has_link = TRUE; 383 break; 384 default: 385 break; 386 } 387 } 388 389 /* 390 * Disabling Rx/Tx MACs have a side-effect of resetting 391 * JME_TXNDA/JME_RXNDA register to the first address of 392 * Tx/Rx descriptor address. So driver should reset its 393 * internal procucer/consumer pointer and reclaim any 394 * allocated resources. Note, just saving the value of 395 * JME_TXNDA and JME_RXNDA registers before stopping MAC 396 * and restoring JME_TXNDA/JME_RXNDA register is not 397 * sufficient to make sure correct MAC state because 398 * stopping MAC operation can take a while and hardware 399 * might have updated JME_TXNDA/JME_RXNDA registers 400 * during the stop operation. 401 */ 402 403 /* Disable interrupts */ 404 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 405 406 /* Stop driver */ 407 ifp->if_flags &= ~IFF_RUNNING; 408 ifq_clr_oactive(&ifp->if_snd); 409 ifp->if_timer = 0; 410 callout_stop(&sc->jme_tick_ch); 411 412 /* Stop receiver/transmitter. */ 413 jme_stop_rx(sc); 414 jme_stop_tx(sc); 415 416 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 417 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 418 419 jme_rxeof(rdata, -1); 420 if (rdata->jme_rxhead != NULL) 421 m_freem(rdata->jme_rxhead); 422 JME_RXCHAIN_RESET(rdata); 423 424 /* 425 * Reuse configured Rx descriptors and reset 426 * procuder/consumer index. 427 */ 428 rdata->jme_rx_cons = 0; 429 } 430 if (JME_ENABLE_HWRSS(sc)) 431 jme_enable_rss(sc); 432 else 433 jme_disable_rss(sc); 434 435 jme_txeof(tdata); 436 if (tdata->jme_tx_cnt != 0) { 437 /* Remove queued packets for transmit. */ 438 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 439 txd = &tdata->jme_txdesc[i]; 440 if (txd->tx_m != NULL) { 441 bus_dmamap_unload( tdata->jme_tx_tag, 442 txd->tx_dmamap); 443 m_freem(txd->tx_m); 444 txd->tx_m = NULL; 445 txd->tx_ndesc = 0; 446 ifp->if_oerrors++; 447 } 448 } 449 } 450 jme_init_tx_ring(tdata); 451 452 /* Initialize shadow status block. */ 453 jme_init_ssb(sc); 454 455 /* Program MAC with resolved speed/duplex/flow-control. */ 456 if (sc->jme_has_link) { 457 jme_mac_config(sc); 458 459 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 460 461 /* Set Tx ring address to the hardware. */ 462 paddr = tdata->jme_tx_ring_paddr; 463 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 464 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 465 466 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 467 CSR_WRITE_4(sc, JME_RXCSR, 468 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 469 470 /* Set Rx ring address to the hardware. */ 471 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 472 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 473 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 474 } 475 476 /* Restart receiver/transmitter. */ 477 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 478 RXCSR_RXQ_START); 479 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 480 } 481 482 ifp->if_flags |= IFF_RUNNING; 483 ifq_clr_oactive(&ifp->if_snd); 484 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 485 JME_TICK_CPUID); 486 487 #ifdef IFPOLL_ENABLE 488 if (!(ifp->if_flags & IFF_NPOLLING)) 489 #endif 490 /* Reenable interrupts. */ 491 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 492 493 done: 494 if (sc->jme_in_tick) 495 jme_deserialize_skipmain(sc); 496 } 497 498 /* 499 * Get the current interface media status. 500 */ 501 static void 502 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 503 { 504 struct jme_softc *sc = ifp->if_softc; 505 struct mii_data *mii = device_get_softc(sc->jme_miibus); 506 507 ASSERT_IFNET_SERIALIZED_ALL(ifp); 508 509 mii_pollstat(mii); 510 ifmr->ifm_status = mii->mii_media_status; 511 ifmr->ifm_active = mii->mii_media_active; 512 } 513 514 /* 515 * Set hardware to newly-selected media. 516 */ 517 static int 518 jme_mediachange(struct ifnet *ifp) 519 { 520 struct jme_softc *sc = ifp->if_softc; 521 struct mii_data *mii = device_get_softc(sc->jme_miibus); 522 int error; 523 524 ASSERT_IFNET_SERIALIZED_ALL(ifp); 525 526 if (mii->mii_instance != 0) { 527 struct mii_softc *miisc; 528 529 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 530 mii_phy_reset(miisc); 531 } 532 error = mii_mediachg(mii); 533 534 return (error); 535 } 536 537 static int 538 jme_probe(device_t dev) 539 { 540 const struct jme_dev *sp; 541 uint16_t vid, did; 542 543 vid = pci_get_vendor(dev); 544 did = pci_get_device(dev); 545 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 546 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 547 struct jme_softc *sc = device_get_softc(dev); 548 549 sc->jme_caps = sp->jme_caps; 550 device_set_desc(dev, sp->jme_name); 551 return (0); 552 } 553 } 554 return (ENXIO); 555 } 556 557 static int 558 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 559 { 560 uint32_t reg; 561 int i; 562 563 *val = 0; 564 for (i = JME_TIMEOUT; i > 0; i--) { 565 reg = CSR_READ_4(sc, JME_SMBCSR); 566 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 567 break; 568 DELAY(1); 569 } 570 571 if (i == 0) { 572 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 573 return (ETIMEDOUT); 574 } 575 576 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 577 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 578 for (i = JME_TIMEOUT; i > 0; i--) { 579 DELAY(1); 580 reg = CSR_READ_4(sc, JME_SMBINTF); 581 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 582 break; 583 } 584 585 if (i == 0) { 586 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 587 return (ETIMEDOUT); 588 } 589 590 reg = CSR_READ_4(sc, JME_SMBINTF); 591 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 592 593 return (0); 594 } 595 596 static int 597 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 598 { 599 uint8_t fup, reg, val; 600 uint32_t offset; 601 int match; 602 603 offset = 0; 604 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 605 fup != JME_EEPROM_SIG0) 606 return (ENOENT); 607 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 608 fup != JME_EEPROM_SIG1) 609 return (ENOENT); 610 match = 0; 611 do { 612 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 613 break; 614 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 615 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 616 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 617 break; 618 if (reg >= JME_PAR0 && 619 reg < JME_PAR0 + ETHER_ADDR_LEN) { 620 if (jme_eeprom_read_byte(sc, offset + 2, 621 &val) != 0) 622 break; 623 eaddr[reg - JME_PAR0] = val; 624 match++; 625 } 626 } 627 /* Check for the end of EEPROM descriptor. */ 628 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 629 break; 630 /* Try next eeprom descriptor. */ 631 offset += JME_EEPROM_DESC_BYTES; 632 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 633 634 if (match == ETHER_ADDR_LEN) 635 return (0); 636 637 return (ENOENT); 638 } 639 640 static void 641 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 642 { 643 uint32_t par0, par1; 644 645 /* Read station address. */ 646 par0 = CSR_READ_4(sc, JME_PAR0); 647 par1 = CSR_READ_4(sc, JME_PAR1); 648 par1 &= 0xFFFF; 649 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 650 device_printf(sc->jme_dev, 651 "generating fake ethernet address.\n"); 652 par0 = karc4random(); 653 /* Set OUI to JMicron. */ 654 eaddr[0] = 0x00; 655 eaddr[1] = 0x1B; 656 eaddr[2] = 0x8C; 657 eaddr[3] = (par0 >> 16) & 0xff; 658 eaddr[4] = (par0 >> 8) & 0xff; 659 eaddr[5] = par0 & 0xff; 660 } else { 661 eaddr[0] = (par0 >> 0) & 0xFF; 662 eaddr[1] = (par0 >> 8) & 0xFF; 663 eaddr[2] = (par0 >> 16) & 0xFF; 664 eaddr[3] = (par0 >> 24) & 0xFF; 665 eaddr[4] = (par1 >> 0) & 0xFF; 666 eaddr[5] = (par1 >> 8) & 0xFF; 667 } 668 } 669 670 static int 671 jme_attach(device_t dev) 672 { 673 struct jme_softc *sc = device_get_softc(dev); 674 struct ifnet *ifp = &sc->arpcom.ac_if; 675 uint32_t reg; 676 uint16_t did; 677 uint8_t pcie_ptr, rev; 678 int error = 0, i, j, rx_desc_cnt, coal_max; 679 uint8_t eaddr[ETHER_ADDR_LEN]; 680 #ifdef IFPOLL_ENABLE 681 int offset, offset_def; 682 #endif 683 684 /* 685 * Initialize serializers 686 */ 687 lwkt_serialize_init(&sc->jme_serialize); 688 lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize); 689 for (i = 0; i < JME_NRXRING_MAX; ++i) { 690 lwkt_serialize_init( 691 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize); 692 } 693 694 /* 695 * Get # of RX ring descriptors 696 */ 697 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count", 698 jme_rx_desc_count); 699 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN); 700 if (rx_desc_cnt > JME_NDESC_MAX) 701 rx_desc_cnt = JME_NDESC_MAX; 702 703 /* 704 * Get # of TX ring descriptors 705 */ 706 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 707 device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count); 708 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = 709 roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN); 710 if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX) 711 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX; 712 713 /* 714 * Get # of RX rings 715 */ 716 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count", 717 jme_rx_ring_count); 718 sc->jme_cdata.jme_rx_ring_cnt = 719 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX); 720 721 /* 722 * Initialize serializer array 723 */ 724 i = 0; 725 sc->jme_serialize_arr[i++] = &sc->jme_serialize; 726 727 KKASSERT(i == JME_TX_SERIALIZE); 728 sc->jme_serialize_arr[i++] = 729 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 730 731 KKASSERT(i == JME_RX_SERIALIZE); 732 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) { 733 sc->jme_serialize_arr[i++] = 734 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize; 735 } 736 KKASSERT(i <= JME_NSERIALIZE); 737 sc->jme_serialize_cnt = i; 738 739 /* 740 * Setup TX ring specific data 741 */ 742 sc->jme_cdata.jme_tx_data.jme_sc = sc; 743 744 /* 745 * Setup RX rings specific data 746 */ 747 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 748 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 749 750 rdata->jme_sc = sc; 751 rdata->jme_rx_coal = jme_rx_status[i].jme_coal; 752 rdata->jme_rx_comp = jme_rx_status[i].jme_comp; 753 rdata->jme_rx_empty = jme_rx_status[i].jme_empty; 754 rdata->jme_rx_idx = i; 755 rdata->jme_rx_desc_cnt = rx_desc_cnt; 756 } 757 758 sc->jme_dev = dev; 759 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 760 761 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 762 763 callout_init(&sc->jme_tick_ch); 764 765 #ifndef BURN_BRIDGES 766 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 767 uint32_t irq, mem; 768 769 irq = pci_read_config(dev, PCIR_INTLINE, 4); 770 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 771 772 device_printf(dev, "chip is in D%d power mode " 773 "-- setting to D0\n", pci_get_powerstate(dev)); 774 775 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 776 777 pci_write_config(dev, PCIR_INTLINE, irq, 4); 778 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 779 } 780 #endif /* !BURN_BRIDGE */ 781 782 /* Enable bus mastering */ 783 pci_enable_busmaster(dev); 784 785 /* 786 * Allocate IO memory 787 * 788 * JMC250 supports both memory mapped and I/O register space 789 * access. Because I/O register access should use different 790 * BARs to access registers it's waste of time to use I/O 791 * register spce access. JMC250 uses 16K to map entire memory 792 * space. 793 */ 794 sc->jme_mem_rid = JME_PCIR_BAR; 795 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 796 &sc->jme_mem_rid, RF_ACTIVE); 797 if (sc->jme_mem_res == NULL) { 798 device_printf(dev, "can't allocate IO memory\n"); 799 return ENXIO; 800 } 801 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 802 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 803 804 /* 805 * Allocate IRQ 806 */ 807 error = jme_intr_alloc(dev); 808 if (error) 809 goto fail; 810 811 /* 812 * Extract revisions 813 */ 814 reg = CSR_READ_4(sc, JME_CHIPMODE); 815 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 816 CHIPMODE_NOT_FPGA) { 817 sc->jme_caps |= JME_CAP_FPGA; 818 if (bootverbose) { 819 device_printf(dev, "FPGA revision: 0x%04x\n", 820 (reg & CHIPMODE_FPGA_REV_MASK) >> 821 CHIPMODE_FPGA_REV_SHIFT); 822 } 823 } 824 825 /* NOTE: FM revision is put in the upper 4 bits */ 826 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 827 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 828 if (bootverbose) 829 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 830 831 did = pci_get_device(dev); 832 switch (did) { 833 case PCI_PRODUCT_JMICRON_JMC250: 834 if (rev == JME_REV1_A2) 835 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 836 break; 837 838 case PCI_PRODUCT_JMICRON_JMC260: 839 if (rev == JME_REV2) 840 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 841 break; 842 843 default: 844 panic("unknown device id 0x%04x", did); 845 } 846 if (rev >= JME_REV2) { 847 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 848 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 849 GHC_TXMAC_CLKSRC_1000; 850 } 851 852 /* Reset the ethernet controller. */ 853 jme_reset(sc); 854 855 /* Map MSI/MSI-X vectors */ 856 jme_set_msinum(sc); 857 858 /* Get station address. */ 859 reg = CSR_READ_4(sc, JME_SMBCSR); 860 if (reg & SMBCSR_EEPROM_PRESENT) 861 error = jme_eeprom_macaddr(sc, eaddr); 862 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 863 if (error != 0 && (bootverbose)) { 864 device_printf(dev, "ethernet hardware address " 865 "not found in EEPROM.\n"); 866 } 867 jme_reg_macaddr(sc, eaddr); 868 } 869 870 /* 871 * Save PHY address. 872 * Integrated JR0211 has fixed PHY address whereas FPGA version 873 * requires PHY probing to get correct PHY address. 874 */ 875 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 876 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 877 GPREG0_PHY_ADDR_MASK; 878 if (bootverbose) { 879 device_printf(dev, "PHY is at address %d.\n", 880 sc->jme_phyaddr); 881 } 882 } else { 883 sc->jme_phyaddr = 0; 884 } 885 886 /* Set max allowable DMA size. */ 887 pcie_ptr = pci_get_pciecap_ptr(dev); 888 if (pcie_ptr != 0) { 889 uint16_t ctrl; 890 891 sc->jme_caps |= JME_CAP_PCIE; 892 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 893 if (bootverbose) { 894 device_printf(dev, "Read request size : %d bytes.\n", 895 128 << ((ctrl >> 12) & 0x07)); 896 device_printf(dev, "TLP payload size : %d bytes.\n", 897 128 << ((ctrl >> 5) & 0x07)); 898 } 899 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 900 case PCIEM_DEVCTL_MAX_READRQ_128: 901 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 902 break; 903 case PCIEM_DEVCTL_MAX_READRQ_256: 904 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 905 break; 906 default: 907 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 908 break; 909 } 910 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 911 } else { 912 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 913 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 914 } 915 916 #ifdef notyet 917 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 918 sc->jme_caps |= JME_CAP_PMCAP; 919 #endif 920 921 #ifdef IFPOLL_ENABLE 922 /* 923 * NPOLLING RX CPU offset 924 */ 925 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 926 offset = 0; 927 } else { 928 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 929 device_get_unit(dev)) % ncpus2; 930 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 931 if (offset >= ncpus2 || 932 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 933 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 934 offset, offset_def); 935 offset = offset_def; 936 } 937 } 938 sc->jme_npoll_rxoff = offset; 939 940 /* 941 * NPOLLING TX CPU offset 942 */ 943 offset_def = sc->jme_npoll_rxoff; 944 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 945 if (offset >= ncpus2) { 946 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 947 offset, offset_def); 948 offset = offset_def; 949 } 950 sc->jme_npoll_txoff = offset; 951 #endif 952 953 /* 954 * Set default coalesce valves 955 */ 956 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 957 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 958 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 959 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 960 961 /* 962 * Adjust coalesce valves, in case that the number of TX/RX 963 * descs are set to small values by users. 964 * 965 * NOTE: coal_max will not be zero, since number of descs 966 * must aligned by JME_NDESC_ALIGN (16 currently) 967 */ 968 coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2; 969 if (coal_max < sc->jme_tx_coal_pkt) 970 sc->jme_tx_coal_pkt = coal_max; 971 972 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2; 973 if (coal_max < sc->jme_rx_coal_pkt) 974 sc->jme_rx_coal_pkt = coal_max; 975 976 sc->jme_cdata.jme_tx_data.jme_tx_wreg = 16; 977 978 /* 979 * Create sysctl tree 980 */ 981 jme_sysctl_node(sc); 982 983 /* Allocate DMA stuffs */ 984 error = jme_dma_alloc(sc); 985 if (error) 986 goto fail; 987 988 ifp->if_softc = sc; 989 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 990 ifp->if_init = jme_init; 991 ifp->if_ioctl = jme_ioctl; 992 ifp->if_start = jme_start; 993 #ifdef IFPOLL_ENABLE 994 ifp->if_npoll = jme_npoll; 995 #endif 996 ifp->if_watchdog = jme_watchdog; 997 ifp->if_serialize = jme_serialize; 998 ifp->if_deserialize = jme_deserialize; 999 ifp->if_tryserialize = jme_tryserialize; 1000 #ifdef INVARIANTS 1001 ifp->if_serialize_assert = jme_serialize_assert; 1002 #endif 1003 ifq_set_maxlen(&ifp->if_snd, 1004 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD); 1005 ifq_set_ready(&ifp->if_snd); 1006 1007 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 1008 ifp->if_capabilities = IFCAP_HWCSUM | 1009 IFCAP_TSO | 1010 IFCAP_VLAN_MTU | 1011 IFCAP_VLAN_HWTAGGING; 1012 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN) 1013 ifp->if_capabilities |= IFCAP_RSS; 1014 ifp->if_capenable = ifp->if_capabilities; 1015 1016 /* 1017 * Disable TXCSUM by default to improve bulk data 1018 * transmit performance (+20Mbps improvement). 1019 */ 1020 ifp->if_capenable &= ~IFCAP_TXCSUM; 1021 1022 if (ifp->if_capenable & IFCAP_TXCSUM) 1023 ifp->if_hwassist |= JME_CSUM_FEATURES; 1024 ifp->if_hwassist |= CSUM_TSO; 1025 1026 /* Set up MII bus. */ 1027 error = mii_phy_probe(dev, &sc->jme_miibus, 1028 jme_mediachange, jme_mediastatus); 1029 if (error) { 1030 device_printf(dev, "no PHY found!\n"); 1031 goto fail; 1032 } 1033 1034 /* 1035 * Save PHYADDR for FPGA mode PHY. 1036 */ 1037 if (sc->jme_caps & JME_CAP_FPGA) { 1038 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1039 1040 if (mii->mii_instance != 0) { 1041 struct mii_softc *miisc; 1042 1043 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 1044 if (miisc->mii_phy != 0) { 1045 sc->jme_phyaddr = miisc->mii_phy; 1046 break; 1047 } 1048 } 1049 if (sc->jme_phyaddr != 0) { 1050 device_printf(sc->jme_dev, 1051 "FPGA PHY is at %d\n", sc->jme_phyaddr); 1052 /* vendor magic. */ 1053 jme_miibus_writereg(dev, sc->jme_phyaddr, 1054 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 1055 1056 /* XXX should we clear JME_WA_EXTFIFO */ 1057 } 1058 } 1059 } 1060 1061 ether_ifattach(ifp, eaddr, NULL); 1062 1063 /* Tell the upper layer(s) we support long frames. */ 1064 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1065 1066 error = jme_intr_setup(dev); 1067 if (error) { 1068 ether_ifdetach(ifp); 1069 goto fail; 1070 } 1071 1072 return 0; 1073 fail: 1074 jme_detach(dev); 1075 return (error); 1076 } 1077 1078 static int 1079 jme_detach(device_t dev) 1080 { 1081 struct jme_softc *sc = device_get_softc(dev); 1082 1083 if (device_is_attached(dev)) { 1084 struct ifnet *ifp = &sc->arpcom.ac_if; 1085 1086 ifnet_serialize_all(ifp); 1087 jme_stop(sc); 1088 jme_intr_teardown(dev); 1089 ifnet_deserialize_all(ifp); 1090 1091 ether_ifdetach(ifp); 1092 } 1093 1094 if (sc->jme_sysctl_tree != NULL) 1095 sysctl_ctx_free(&sc->jme_sysctl_ctx); 1096 1097 if (sc->jme_miibus != NULL) 1098 device_delete_child(dev, sc->jme_miibus); 1099 bus_generic_detach(dev); 1100 1101 jme_intr_free(dev); 1102 1103 if (sc->jme_mem_res != NULL) { 1104 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 1105 sc->jme_mem_res); 1106 } 1107 1108 jme_dma_free(sc); 1109 1110 return (0); 1111 } 1112 1113 static void 1114 jme_sysctl_node(struct jme_softc *sc) 1115 { 1116 #ifdef JME_RSS_DEBUG 1117 int r; 1118 #endif 1119 1120 sysctl_ctx_init(&sc->jme_sysctl_ctx); 1121 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 1122 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1123 device_get_nameunit(sc->jme_dev), 1124 CTLFLAG_RD, 0, ""); 1125 if (sc->jme_sysctl_tree == NULL) { 1126 device_printf(sc->jme_dev, "can't add sysctl node\n"); 1127 return; 1128 } 1129 1130 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1131 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1132 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1133 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 1134 1135 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1136 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1137 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1138 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 1139 1140 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1141 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1142 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1143 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 1144 1145 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1146 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1147 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1148 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 1149 1150 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1151 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1152 "rx_desc_count", CTLFLAG_RD, 1153 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt, 1154 0, "RX desc count"); 1155 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1156 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1157 "tx_desc_count", CTLFLAG_RD, 1158 &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, 1159 0, "TX desc count"); 1160 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1161 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1162 "rx_ring_count", CTLFLAG_RD, 1163 &sc->jme_cdata.jme_rx_ring_cnt, 1164 0, "RX ring count"); 1165 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1166 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1167 "tx_wreg", CTLFLAG_RW, 1168 &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0, 1169 "# of segments before writing to hardware register"); 1170 1171 #ifdef JME_RSS_DEBUG 1172 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1173 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1174 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug, 1175 0, "RSS debug level"); 1176 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1177 char rx_ring_desc[32]; 1178 1179 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1180 "rx_ring%d_pkt", r); 1181 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1182 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1183 rx_ring_desc, CTLFLAG_RW, 1184 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets"); 1185 1186 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1187 "rx_ring%d_emp", r); 1188 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1189 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1190 rx_ring_desc, CTLFLAG_RW, 1191 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp, 1192 "# of time RX ring empty"); 1193 } 1194 #endif 1195 1196 #ifdef IFPOLL_ENABLE 1197 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1198 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1199 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1200 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1201 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1202 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1203 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 1204 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1205 #endif 1206 } 1207 1208 static int 1209 jme_dma_alloc(struct jme_softc *sc) 1210 { 1211 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1212 struct jme_txdesc *txd; 1213 bus_dmamem_t dmem; 1214 int error, i, asize; 1215 1216 asize = __VM_CACHELINE_ALIGN( 1217 tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc)); 1218 tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF, 1219 M_WAITOK | M_ZERO); 1220 1221 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1222 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 1223 1224 asize = __VM_CACHELINE_ALIGN( 1225 rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc)); 1226 rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF, 1227 M_WAITOK | M_ZERO); 1228 } 1229 1230 /* Create parent ring tag. */ 1231 error = bus_dma_tag_create(NULL,/* parent */ 1232 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1233 sc->jme_lowaddr, /* lowaddr */ 1234 BUS_SPACE_MAXADDR, /* highaddr */ 1235 NULL, NULL, /* filter, filterarg */ 1236 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1237 0, /* nsegments */ 1238 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1239 0, /* flags */ 1240 &sc->jme_cdata.jme_ring_tag); 1241 if (error) { 1242 device_printf(sc->jme_dev, 1243 "could not create parent ring DMA tag.\n"); 1244 return error; 1245 } 1246 1247 /* 1248 * Create DMA stuffs for TX ring 1249 */ 1250 asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN); 1251 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1252 JME_TX_RING_ALIGN, 0, 1253 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1254 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1255 if (error) { 1256 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1257 return error; 1258 } 1259 tdata->jme_tx_ring_tag = dmem.dmem_tag; 1260 tdata->jme_tx_ring_map = dmem.dmem_map; 1261 tdata->jme_tx_ring = dmem.dmem_addr; 1262 tdata->jme_tx_ring_paddr = dmem.dmem_busaddr; 1263 1264 /* 1265 * Create DMA stuffs for RX rings 1266 */ 1267 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1268 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1269 if (error) 1270 return error; 1271 } 1272 1273 /* Create parent buffer tag. */ 1274 error = bus_dma_tag_create(NULL,/* parent */ 1275 1, 0, /* algnmnt, boundary */ 1276 sc->jme_lowaddr, /* lowaddr */ 1277 BUS_SPACE_MAXADDR, /* highaddr */ 1278 NULL, NULL, /* filter, filterarg */ 1279 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1280 0, /* nsegments */ 1281 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1282 0, /* flags */ 1283 &sc->jme_cdata.jme_buffer_tag); 1284 if (error) { 1285 device_printf(sc->jme_dev, 1286 "could not create parent buffer DMA tag.\n"); 1287 return error; 1288 } 1289 1290 /* 1291 * Create DMA stuffs for shadow status block 1292 */ 1293 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN); 1294 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1295 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1296 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1297 if (error) { 1298 device_printf(sc->jme_dev, 1299 "could not create shadow status block.\n"); 1300 return error; 1301 } 1302 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1303 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1304 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1305 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1306 1307 /* 1308 * Create DMA stuffs for TX buffers 1309 */ 1310 1311 /* Create tag for Tx buffers. */ 1312 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1313 1, 0, /* algnmnt, boundary */ 1314 BUS_SPACE_MAXADDR, /* lowaddr */ 1315 BUS_SPACE_MAXADDR, /* highaddr */ 1316 NULL, NULL, /* filter, filterarg */ 1317 JME_TSO_MAXSIZE, /* maxsize */ 1318 JME_MAXTXSEGS, /* nsegments */ 1319 JME_MAXSEGSIZE, /* maxsegsize */ 1320 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1321 &tdata->jme_tx_tag); 1322 if (error != 0) { 1323 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1324 return error; 1325 } 1326 1327 /* Create DMA maps for Tx buffers. */ 1328 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1329 txd = &tdata->jme_txdesc[i]; 1330 error = bus_dmamap_create(tdata->jme_tx_tag, 1331 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1332 &txd->tx_dmamap); 1333 if (error) { 1334 int j; 1335 1336 device_printf(sc->jme_dev, 1337 "could not create %dth Tx dmamap.\n", i); 1338 1339 for (j = 0; j < i; ++j) { 1340 txd = &tdata->jme_txdesc[j]; 1341 bus_dmamap_destroy(tdata->jme_tx_tag, 1342 txd->tx_dmamap); 1343 } 1344 bus_dma_tag_destroy(tdata->jme_tx_tag); 1345 tdata->jme_tx_tag = NULL; 1346 return error; 1347 } 1348 } 1349 1350 /* 1351 * Create DMA stuffs for RX buffers 1352 */ 1353 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1354 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1355 if (error) 1356 return error; 1357 } 1358 return 0; 1359 } 1360 1361 static void 1362 jme_dma_free(struct jme_softc *sc) 1363 { 1364 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1365 struct jme_txdesc *txd; 1366 struct jme_rxdesc *rxd; 1367 struct jme_rxdata *rdata; 1368 int i, r; 1369 1370 /* Tx ring */ 1371 if (tdata->jme_tx_ring_tag != NULL) { 1372 bus_dmamap_unload(tdata->jme_tx_ring_tag, 1373 tdata->jme_tx_ring_map); 1374 bus_dmamem_free(tdata->jme_tx_ring_tag, 1375 tdata->jme_tx_ring, tdata->jme_tx_ring_map); 1376 bus_dma_tag_destroy(tdata->jme_tx_ring_tag); 1377 tdata->jme_tx_ring_tag = NULL; 1378 } 1379 1380 /* Rx ring */ 1381 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1382 rdata = &sc->jme_cdata.jme_rx_data[r]; 1383 if (rdata->jme_rx_ring_tag != NULL) { 1384 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1385 rdata->jme_rx_ring_map); 1386 bus_dmamem_free(rdata->jme_rx_ring_tag, 1387 rdata->jme_rx_ring, 1388 rdata->jme_rx_ring_map); 1389 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1390 rdata->jme_rx_ring_tag = NULL; 1391 } 1392 } 1393 1394 /* Tx buffers */ 1395 if (tdata->jme_tx_tag != NULL) { 1396 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 1397 txd = &tdata->jme_txdesc[i]; 1398 bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap); 1399 } 1400 bus_dma_tag_destroy(tdata->jme_tx_tag); 1401 tdata->jme_tx_tag = NULL; 1402 } 1403 1404 /* Rx buffers */ 1405 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1406 rdata = &sc->jme_cdata.jme_rx_data[r]; 1407 if (rdata->jme_rx_tag != NULL) { 1408 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 1409 rxd = &rdata->jme_rxdesc[i]; 1410 bus_dmamap_destroy(rdata->jme_rx_tag, 1411 rxd->rx_dmamap); 1412 } 1413 bus_dmamap_destroy(rdata->jme_rx_tag, 1414 rdata->jme_rx_sparemap); 1415 bus_dma_tag_destroy(rdata->jme_rx_tag); 1416 rdata->jme_rx_tag = NULL; 1417 } 1418 } 1419 1420 /* Shadow status block. */ 1421 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1422 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1423 sc->jme_cdata.jme_ssb_map); 1424 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1425 sc->jme_cdata.jme_ssb_block, 1426 sc->jme_cdata.jme_ssb_map); 1427 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1428 sc->jme_cdata.jme_ssb_tag = NULL; 1429 } 1430 1431 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1432 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1433 sc->jme_cdata.jme_buffer_tag = NULL; 1434 } 1435 if (sc->jme_cdata.jme_ring_tag != NULL) { 1436 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1437 sc->jme_cdata.jme_ring_tag = NULL; 1438 } 1439 1440 if (tdata->jme_txdesc != NULL) { 1441 kfree(tdata->jme_txdesc, M_DEVBUF); 1442 tdata->jme_txdesc = NULL; 1443 } 1444 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1445 rdata = &sc->jme_cdata.jme_rx_data[r]; 1446 if (rdata->jme_rxdesc != NULL) { 1447 kfree(rdata->jme_rxdesc, M_DEVBUF); 1448 rdata->jme_rxdesc = NULL; 1449 } 1450 } 1451 } 1452 1453 /* 1454 * Make sure the interface is stopped at reboot time. 1455 */ 1456 static int 1457 jme_shutdown(device_t dev) 1458 { 1459 return jme_suspend(dev); 1460 } 1461 1462 #ifdef notyet 1463 /* 1464 * Unlike other ethernet controllers, JMC250 requires 1465 * explicit resetting link speed to 10/100Mbps as gigabit 1466 * link will cunsume more power than 375mA. 1467 * Note, we reset the link speed to 10/100Mbps with 1468 * auto-negotiation but we don't know whether that operation 1469 * would succeed or not as we have no control after powering 1470 * off. If the renegotiation fail WOL may not work. Running 1471 * at 1Gbps draws more power than 375mA at 3.3V which is 1472 * specified in PCI specification and that would result in 1473 * complete shutdowning power to ethernet controller. 1474 * 1475 * TODO 1476 * Save current negotiated media speed/duplex/flow-control 1477 * to softc and restore the same link again after resuming. 1478 * PHY handling such as power down/resetting to 100Mbps 1479 * may be better handled in suspend method in phy driver. 1480 */ 1481 static void 1482 jme_setlinkspeed(struct jme_softc *sc) 1483 { 1484 struct mii_data *mii; 1485 int aneg, i; 1486 1487 JME_LOCK_ASSERT(sc); 1488 1489 mii = device_get_softc(sc->jme_miibus); 1490 mii_pollstat(mii); 1491 aneg = 0; 1492 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1493 switch IFM_SUBTYPE(mii->mii_media_active) { 1494 case IFM_10_T: 1495 case IFM_100_TX: 1496 return; 1497 case IFM_1000_T: 1498 aneg++; 1499 default: 1500 break; 1501 } 1502 } 1503 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1504 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1505 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1506 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1507 BMCR_AUTOEN | BMCR_STARTNEG); 1508 DELAY(1000); 1509 if (aneg != 0) { 1510 /* Poll link state until jme(4) get a 10/100 link. */ 1511 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1512 mii_pollstat(mii); 1513 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1514 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1515 case IFM_10_T: 1516 case IFM_100_TX: 1517 jme_mac_config(sc); 1518 return; 1519 default: 1520 break; 1521 } 1522 } 1523 JME_UNLOCK(sc); 1524 pause("jmelnk", hz); 1525 JME_LOCK(sc); 1526 } 1527 if (i == MII_ANEGTICKS_GIGE) 1528 device_printf(sc->jme_dev, "establishing link failed, " 1529 "WOL may not work!"); 1530 } 1531 /* 1532 * No link, force MAC to have 100Mbps, full-duplex link. 1533 * This is the last resort and may/may not work. 1534 */ 1535 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1536 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1537 jme_mac_config(sc); 1538 } 1539 1540 static void 1541 jme_setwol(struct jme_softc *sc) 1542 { 1543 struct ifnet *ifp = &sc->arpcom.ac_if; 1544 uint32_t gpr, pmcs; 1545 uint16_t pmstat; 1546 int pmc; 1547 1548 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1549 /* No PME capability, PHY power down. */ 1550 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1551 MII_BMCR, BMCR_PDOWN); 1552 return; 1553 } 1554 1555 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1556 pmcs = CSR_READ_4(sc, JME_PMCS); 1557 pmcs &= ~PMCS_WOL_ENB_MASK; 1558 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1559 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1560 /* Enable PME message. */ 1561 gpr |= GPREG0_PME_ENB; 1562 /* For gigabit controllers, reset link speed to 10/100. */ 1563 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1564 jme_setlinkspeed(sc); 1565 } 1566 1567 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1568 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1569 1570 /* Request PME. */ 1571 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1572 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1573 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1574 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1575 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1576 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1577 /* No WOL, PHY power down. */ 1578 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1579 MII_BMCR, BMCR_PDOWN); 1580 } 1581 } 1582 #endif 1583 1584 static int 1585 jme_suspend(device_t dev) 1586 { 1587 struct jme_softc *sc = device_get_softc(dev); 1588 struct ifnet *ifp = &sc->arpcom.ac_if; 1589 1590 ifnet_serialize_all(ifp); 1591 jme_stop(sc); 1592 #ifdef notyet 1593 jme_setwol(sc); 1594 #endif 1595 ifnet_deserialize_all(ifp); 1596 1597 return (0); 1598 } 1599 1600 static int 1601 jme_resume(device_t dev) 1602 { 1603 struct jme_softc *sc = device_get_softc(dev); 1604 struct ifnet *ifp = &sc->arpcom.ac_if; 1605 #ifdef notyet 1606 int pmc; 1607 #endif 1608 1609 ifnet_serialize_all(ifp); 1610 1611 #ifdef notyet 1612 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1613 uint16_t pmstat; 1614 1615 pmstat = pci_read_config(sc->jme_dev, 1616 pmc + PCIR_POWER_STATUS, 2); 1617 /* Disable PME clear PME status. */ 1618 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1619 pci_write_config(sc->jme_dev, 1620 pmc + PCIR_POWER_STATUS, pmstat, 2); 1621 } 1622 #endif 1623 1624 if (ifp->if_flags & IFF_UP) 1625 jme_init(sc); 1626 1627 ifnet_deserialize_all(ifp); 1628 1629 return (0); 1630 } 1631 1632 static __inline int 1633 jme_tso_pullup(struct mbuf **mp) 1634 { 1635 int hoff, iphlen, thoff; 1636 struct mbuf *m; 1637 1638 m = *mp; 1639 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1640 1641 iphlen = m->m_pkthdr.csum_iphlen; 1642 thoff = m->m_pkthdr.csum_thlen; 1643 hoff = m->m_pkthdr.csum_lhlen; 1644 1645 KASSERT(iphlen > 0, ("invalid ip hlen")); 1646 KASSERT(thoff > 0, ("invalid tcp hlen")); 1647 KASSERT(hoff > 0, ("invalid ether hlen")); 1648 1649 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1650 m = m_pullup(m, hoff + iphlen + thoff); 1651 if (m == NULL) { 1652 *mp = NULL; 1653 return ENOBUFS; 1654 } 1655 *mp = m; 1656 } 1657 return 0; 1658 } 1659 1660 static int 1661 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used) 1662 { 1663 struct jme_txdesc *txd; 1664 struct jme_desc *desc; 1665 struct mbuf *m; 1666 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1667 int maxsegs, nsegs; 1668 int error, i, prod, symbol_desc; 1669 uint32_t cflags, flag64, mss; 1670 1671 M_ASSERTPKTHDR((*m_head)); 1672 1673 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) { 1674 /* XXX Is this necessary? */ 1675 error = jme_tso_pullup(m_head); 1676 if (error) 1677 return error; 1678 } 1679 1680 prod = tdata->jme_tx_prod; 1681 txd = &tdata->jme_txdesc[prod]; 1682 1683 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1684 symbol_desc = 1; 1685 else 1686 symbol_desc = 0; 1687 1688 maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) - 1689 (JME_TXD_RSVD + symbol_desc); 1690 if (maxsegs > JME_MAXTXSEGS) 1691 maxsegs = JME_MAXTXSEGS; 1692 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc), 1693 ("not enough segments %d", maxsegs)); 1694 1695 error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag, 1696 txd->tx_dmamap, m_head, 1697 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1698 if (error) 1699 goto fail; 1700 *segs_used += nsegs; 1701 1702 bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap, 1703 BUS_DMASYNC_PREWRITE); 1704 1705 m = *m_head; 1706 cflags = 0; 1707 mss = 0; 1708 1709 /* Configure checksum offload. */ 1710 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1711 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT; 1712 cflags |= JME_TD_TSO; 1713 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) { 1714 if (m->m_pkthdr.csum_flags & CSUM_IP) 1715 cflags |= JME_TD_IPCSUM; 1716 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1717 cflags |= JME_TD_TCPCSUM; 1718 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1719 cflags |= JME_TD_UDPCSUM; 1720 } 1721 1722 /* Configure VLAN. */ 1723 if (m->m_flags & M_VLANTAG) { 1724 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1725 cflags |= JME_TD_VLAN_TAG; 1726 } 1727 1728 desc = &tdata->jme_tx_ring[prod]; 1729 desc->flags = htole32(cflags); 1730 desc->addr_hi = htole32(m->m_pkthdr.len); 1731 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1732 /* 1733 * Use 64bits TX desc chain format. 1734 * 1735 * The first TX desc of the chain, which is setup here, 1736 * is just a symbol TX desc carrying no payload. 1737 */ 1738 flag64 = JME_TD_64BIT; 1739 desc->buflen = htole32(mss); 1740 desc->addr_lo = 0; 1741 1742 *segs_used += 1; 1743 1744 /* No effective TX desc is consumed */ 1745 i = 0; 1746 } else { 1747 /* 1748 * Use 32bits TX desc chain format. 1749 * 1750 * The first TX desc of the chain, which is setup here, 1751 * is an effective TX desc carrying the first segment of 1752 * the mbuf chain. 1753 */ 1754 flag64 = 0; 1755 desc->buflen = htole32(mss | txsegs[0].ds_len); 1756 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1757 1758 /* One effective TX desc is consumed */ 1759 i = 1; 1760 } 1761 tdata->jme_tx_cnt++; 1762 KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1763 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1764 1765 txd->tx_ndesc = 1 - i; 1766 for (; i < nsegs; i++) { 1767 desc = &tdata->jme_tx_ring[prod]; 1768 desc->buflen = htole32(txsegs[i].ds_len); 1769 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1770 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1771 desc->flags = htole32(JME_TD_OWN | flag64); 1772 1773 tdata->jme_tx_cnt++; 1774 KKASSERT(tdata->jme_tx_cnt <= 1775 tdata->jme_tx_desc_cnt - JME_TXD_RSVD); 1776 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt); 1777 } 1778 1779 /* Update producer index. */ 1780 tdata->jme_tx_prod = prod; 1781 /* 1782 * Finally request interrupt and give the first descriptor 1783 * owenership to hardware. 1784 */ 1785 desc = txd->tx_desc; 1786 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1787 1788 txd->tx_m = m; 1789 txd->tx_ndesc += nsegs; 1790 1791 return 0; 1792 fail: 1793 m_freem(*m_head); 1794 *m_head = NULL; 1795 return error; 1796 } 1797 1798 static void 1799 jme_start(struct ifnet *ifp) 1800 { 1801 struct jme_softc *sc = ifp->if_softc; 1802 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1803 struct mbuf *m_head; 1804 int enq = 0; 1805 1806 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 1807 1808 if (!sc->jme_has_link) { 1809 ifq_purge(&ifp->if_snd); 1810 return; 1811 } 1812 1813 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1814 return; 1815 1816 if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata)) 1817 jme_txeof(tdata); 1818 1819 while (!ifq_is_empty(&ifp->if_snd)) { 1820 /* 1821 * Check number of available TX descs, always 1822 * leave JME_TXD_RSVD free TX descs. 1823 */ 1824 if (tdata->jme_tx_cnt + JME_TXD_SPARE > 1825 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) { 1826 ifq_set_oactive(&ifp->if_snd); 1827 break; 1828 } 1829 1830 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1831 if (m_head == NULL) 1832 break; 1833 1834 /* 1835 * Pack the data into the transmit ring. If we 1836 * don't have room, set the OACTIVE flag and wait 1837 * for the NIC to drain the ring. 1838 */ 1839 if (jme_encap(tdata, &m_head, &enq)) { 1840 KKASSERT(m_head == NULL); 1841 ifp->if_oerrors++; 1842 ifq_set_oactive(&ifp->if_snd); 1843 break; 1844 } 1845 1846 if (enq >= tdata->jme_tx_wreg) { 1847 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | 1848 TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1849 enq = 0; 1850 } 1851 1852 /* 1853 * If there's a BPF listener, bounce a copy of this frame 1854 * to him. 1855 */ 1856 ETHER_BPF_MTAP(ifp, m_head); 1857 1858 /* Set a timeout in case the chip goes out to lunch. */ 1859 ifp->if_timer = JME_TX_TIMEOUT; 1860 } 1861 1862 if (enq > 0) { 1863 /* 1864 * Reading TXCSR takes very long time under heavy load 1865 * so cache TXCSR value and writes the ORed value with 1866 * the kick command to the TXCSR. This saves one register 1867 * access cycle. 1868 */ 1869 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1870 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1871 } 1872 } 1873 1874 static void 1875 jme_watchdog(struct ifnet *ifp) 1876 { 1877 struct jme_softc *sc = ifp->if_softc; 1878 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 1879 1880 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1881 1882 if (!sc->jme_has_link) { 1883 if_printf(ifp, "watchdog timeout (missed link)\n"); 1884 ifp->if_oerrors++; 1885 jme_init(sc); 1886 return; 1887 } 1888 1889 jme_txeof(tdata); 1890 if (tdata->jme_tx_cnt == 0) { 1891 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1892 "-- recovering\n"); 1893 if (!ifq_is_empty(&ifp->if_snd)) 1894 if_devstart(ifp); 1895 return; 1896 } 1897 1898 if_printf(ifp, "watchdog timeout\n"); 1899 ifp->if_oerrors++; 1900 jme_init(sc); 1901 if (!ifq_is_empty(&ifp->if_snd)) 1902 if_devstart(ifp); 1903 } 1904 1905 static int 1906 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1907 { 1908 struct jme_softc *sc = ifp->if_softc; 1909 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1910 struct ifreq *ifr = (struct ifreq *)data; 1911 int error = 0, mask; 1912 1913 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1914 1915 switch (cmd) { 1916 case SIOCSIFMTU: 1917 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1918 (!(sc->jme_caps & JME_CAP_JUMBO) && 1919 ifr->ifr_mtu > JME_MAX_MTU)) { 1920 error = EINVAL; 1921 break; 1922 } 1923 1924 if (ifp->if_mtu != ifr->ifr_mtu) { 1925 /* 1926 * No special configuration is required when interface 1927 * MTU is changed but availability of Tx checksum 1928 * offload should be chcked against new MTU size as 1929 * FIFO size is just 2K. 1930 */ 1931 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1932 ifp->if_capenable &= 1933 ~(IFCAP_TXCSUM | IFCAP_TSO); 1934 ifp->if_hwassist &= 1935 ~(JME_CSUM_FEATURES | CSUM_TSO); 1936 } 1937 ifp->if_mtu = ifr->ifr_mtu; 1938 if (ifp->if_flags & IFF_RUNNING) 1939 jme_init(sc); 1940 } 1941 break; 1942 1943 case SIOCSIFFLAGS: 1944 if (ifp->if_flags & IFF_UP) { 1945 if (ifp->if_flags & IFF_RUNNING) { 1946 if ((ifp->if_flags ^ sc->jme_if_flags) & 1947 (IFF_PROMISC | IFF_ALLMULTI)) 1948 jme_set_filter(sc); 1949 } else { 1950 jme_init(sc); 1951 } 1952 } else { 1953 if (ifp->if_flags & IFF_RUNNING) 1954 jme_stop(sc); 1955 } 1956 sc->jme_if_flags = ifp->if_flags; 1957 break; 1958 1959 case SIOCADDMULTI: 1960 case SIOCDELMULTI: 1961 if (ifp->if_flags & IFF_RUNNING) 1962 jme_set_filter(sc); 1963 break; 1964 1965 case SIOCSIFMEDIA: 1966 case SIOCGIFMEDIA: 1967 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1968 break; 1969 1970 case SIOCSIFCAP: 1971 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1972 1973 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1974 ifp->if_capenable ^= IFCAP_TXCSUM; 1975 if (ifp->if_capenable & IFCAP_TXCSUM) 1976 ifp->if_hwassist |= JME_CSUM_FEATURES; 1977 else 1978 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1979 } 1980 if (mask & IFCAP_RXCSUM) { 1981 uint32_t reg; 1982 1983 ifp->if_capenable ^= IFCAP_RXCSUM; 1984 reg = CSR_READ_4(sc, JME_RXMAC); 1985 reg &= ~RXMAC_CSUM_ENB; 1986 if (ifp->if_capenable & IFCAP_RXCSUM) 1987 reg |= RXMAC_CSUM_ENB; 1988 CSR_WRITE_4(sc, JME_RXMAC, reg); 1989 } 1990 1991 if (mask & IFCAP_VLAN_HWTAGGING) { 1992 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1993 jme_set_vlan(sc); 1994 } 1995 1996 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1997 ifp->if_capenable ^= IFCAP_TSO; 1998 if (ifp->if_capenable & IFCAP_TSO) 1999 ifp->if_hwassist |= CSUM_TSO; 2000 else 2001 ifp->if_hwassist &= ~CSUM_TSO; 2002 } 2003 2004 if (mask & IFCAP_RSS) 2005 ifp->if_capenable ^= IFCAP_RSS; 2006 break; 2007 2008 default: 2009 error = ether_ioctl(ifp, cmd, data); 2010 break; 2011 } 2012 return (error); 2013 } 2014 2015 static void 2016 jme_mac_config(struct jme_softc *sc) 2017 { 2018 struct mii_data *mii; 2019 uint32_t ghc, rxmac, txmac, txpause, gp1; 2020 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 2021 2022 mii = device_get_softc(sc->jme_miibus); 2023 2024 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2025 DELAY(10); 2026 CSR_WRITE_4(sc, JME_GHC, 0); 2027 ghc = 0; 2028 rxmac = CSR_READ_4(sc, JME_RXMAC); 2029 rxmac &= ~RXMAC_FC_ENB; 2030 txmac = CSR_READ_4(sc, JME_TXMAC); 2031 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2032 txpause = CSR_READ_4(sc, JME_TXPFC); 2033 txpause &= ~TXPFC_PAUSE_ENB; 2034 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2035 ghc |= GHC_FULL_DUPLEX; 2036 rxmac &= ~RXMAC_COLL_DET_ENB; 2037 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2038 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2039 TXMAC_FRAME_BURST); 2040 #ifdef notyet 2041 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2042 txpause |= TXPFC_PAUSE_ENB; 2043 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2044 rxmac |= RXMAC_FC_ENB; 2045 #endif 2046 /* Disable retry transmit timer/retry limit. */ 2047 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2048 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2049 } else { 2050 rxmac |= RXMAC_COLL_DET_ENB; 2051 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2052 /* Enable retry transmit timer/retry limit. */ 2053 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2054 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2055 } 2056 2057 /* 2058 * Reprogram Tx/Rx MACs with resolved speed/duplex. 2059 */ 2060 gp1 = CSR_READ_4(sc, JME_GPREG1); 2061 gp1 &= ~GPREG1_WA_HDX; 2062 2063 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2064 hdx = 1; 2065 2066 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2067 case IFM_10_T: 2068 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 2069 if (hdx) 2070 gp1 |= GPREG1_WA_HDX; 2071 break; 2072 2073 case IFM_100_TX: 2074 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 2075 if (hdx) 2076 gp1 |= GPREG1_WA_HDX; 2077 2078 /* 2079 * Use extended FIFO depth to workaround CRC errors 2080 * emitted by chips before JMC250B 2081 */ 2082 phyconf = JMPHY_CONF_EXTFIFO; 2083 break; 2084 2085 case IFM_1000_T: 2086 if (sc->jme_caps & JME_CAP_FASTETH) 2087 break; 2088 2089 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 2090 if (hdx) 2091 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2092 break; 2093 2094 default: 2095 break; 2096 } 2097 CSR_WRITE_4(sc, JME_GHC, ghc); 2098 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2099 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2100 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2101 2102 if (sc->jme_workaround & JME_WA_EXTFIFO) { 2103 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2104 JMPHY_CONF, phyconf); 2105 } 2106 if (sc->jme_workaround & JME_WA_HDX) 2107 CSR_WRITE_4(sc, JME_GPREG1, gp1); 2108 } 2109 2110 static void 2111 jme_intr(void *xsc) 2112 { 2113 struct jme_softc *sc = xsc; 2114 struct ifnet *ifp = &sc->arpcom.ac_if; 2115 uint32_t status; 2116 int r; 2117 2118 ASSERT_SERIALIZED(&sc->jme_serialize); 2119 2120 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2121 if (status == 0 || status == 0xFFFFFFFF) 2122 return; 2123 2124 /* Disable interrupts. */ 2125 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2126 2127 status = CSR_READ_4(sc, JME_INTR_STATUS); 2128 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2129 goto back; 2130 2131 /* Reset PCC counter/timer and Ack interrupts. */ 2132 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2133 2134 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 2135 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2136 2137 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2138 if (status & jme_rx_status[r].jme_coal) { 2139 status |= jme_rx_status[r].jme_coal | 2140 jme_rx_status[r].jme_comp; 2141 } 2142 } 2143 2144 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2145 2146 if (ifp->if_flags & IFF_RUNNING) { 2147 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2148 2149 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2150 jme_rx_intr(sc, status); 2151 2152 if (status & INTR_RXQ_DESC_EMPTY) { 2153 /* 2154 * Notify hardware availability of new Rx buffers. 2155 * Reading RXCSR takes very long time under heavy 2156 * load so cache RXCSR value and writes the ORed 2157 * value with the kick command to the RXCSR. This 2158 * saves one register access cycle. 2159 */ 2160 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2161 RXCSR_RX_ENB | RXCSR_RXQ_START); 2162 } 2163 2164 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 2165 lwkt_serialize_enter(&tdata->jme_tx_serialize); 2166 jme_txeof(tdata); 2167 if (!ifq_is_empty(&ifp->if_snd)) 2168 if_devstart(ifp); 2169 lwkt_serialize_exit(&tdata->jme_tx_serialize); 2170 } 2171 } 2172 back: 2173 /* Reenable interrupts. */ 2174 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2175 } 2176 2177 static void 2178 jme_txeof(struct jme_txdata *tdata) 2179 { 2180 struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if; 2181 int cons; 2182 2183 cons = tdata->jme_tx_cons; 2184 if (cons == tdata->jme_tx_prod) 2185 return; 2186 2187 /* 2188 * Go through our Tx list and free mbufs for those 2189 * frames which have been transmitted. 2190 */ 2191 while (cons != tdata->jme_tx_prod) { 2192 struct jme_txdesc *txd, *next_txd; 2193 uint32_t status, next_status; 2194 int next_cons, nsegs; 2195 2196 txd = &tdata->jme_txdesc[cons]; 2197 KASSERT(txd->tx_m != NULL, 2198 ("%s: freeing NULL mbuf!", __func__)); 2199 2200 status = le32toh(txd->tx_desc->flags); 2201 if ((status & JME_TD_OWN) == JME_TD_OWN) 2202 break; 2203 2204 /* 2205 * NOTE: 2206 * This chip will always update the TX descriptor's 2207 * buflen field and this updating always happens 2208 * after clearing the OWN bit, so even if the OWN 2209 * bit is cleared by the chip, we still don't sure 2210 * about whether the buflen field has been updated 2211 * by the chip or not. To avoid this race, we wait 2212 * for the next TX descriptor's OWN bit to be cleared 2213 * by the chip before reusing this TX descriptor. 2214 */ 2215 next_cons = cons; 2216 JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt); 2217 next_txd = &tdata->jme_txdesc[next_cons]; 2218 if (next_txd->tx_m == NULL) 2219 break; 2220 next_status = le32toh(next_txd->tx_desc->flags); 2221 if ((next_status & JME_TD_OWN) == JME_TD_OWN) 2222 break; 2223 2224 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2225 ifp->if_oerrors++; 2226 } else { 2227 ifp->if_opackets++; 2228 if (status & JME_TD_COLLISION) { 2229 ifp->if_collisions += 2230 le32toh(txd->tx_desc->buflen) & 2231 JME_TD_BUF_LEN_MASK; 2232 } 2233 } 2234 2235 /* 2236 * Only the first descriptor of multi-descriptor 2237 * transmission is updated so driver have to skip entire 2238 * chained buffers for the transmiited frame. In other 2239 * words, JME_TD_OWN bit is valid only at the first 2240 * descriptor of a multi-descriptor transmission. 2241 */ 2242 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2243 tdata->jme_tx_ring[cons].flags = 0; 2244 JME_DESC_INC(cons, tdata->jme_tx_desc_cnt); 2245 } 2246 2247 /* Reclaim transferred mbufs. */ 2248 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2249 m_freem(txd->tx_m); 2250 txd->tx_m = NULL; 2251 tdata->jme_tx_cnt -= txd->tx_ndesc; 2252 KASSERT(tdata->jme_tx_cnt >= 0, 2253 ("%s: Active Tx desc counter was garbled", __func__)); 2254 txd->tx_ndesc = 0; 2255 } 2256 tdata->jme_tx_cons = cons; 2257 2258 /* 1 for symbol TX descriptor */ 2259 if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1) 2260 ifp->if_timer = 0; 2261 2262 if (tdata->jme_tx_cnt + JME_TXD_SPARE <= 2263 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) 2264 ifq_clr_oactive(&ifp->if_snd); 2265 } 2266 2267 static __inline void 2268 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count) 2269 { 2270 int i; 2271 2272 for (i = 0; i < count; ++i) { 2273 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]); 2274 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2275 } 2276 } 2277 2278 static __inline struct pktinfo * 2279 jme_pktinfo(struct pktinfo *pi, uint32_t flags) 2280 { 2281 if (flags & JME_RD_IPV4) 2282 pi->pi_netisr = NETISR_IP; 2283 else if (flags & JME_RD_IPV6) 2284 pi->pi_netisr = NETISR_IPV6; 2285 else 2286 return NULL; 2287 2288 pi->pi_flags = 0; 2289 pi->pi_l3proto = IPPROTO_UNKNOWN; 2290 2291 if (flags & JME_RD_MORE_FRAG) 2292 pi->pi_flags |= PKTINFO_FLAG_FRAG; 2293 else if (flags & JME_RD_TCP) 2294 pi->pi_l3proto = IPPROTO_TCP; 2295 else if (flags & JME_RD_UDP) 2296 pi->pi_l3proto = IPPROTO_UDP; 2297 else 2298 pi = NULL; 2299 return pi; 2300 } 2301 2302 /* Receive a frame. */ 2303 static void 2304 jme_rxpkt(struct jme_rxdata *rdata) 2305 { 2306 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if; 2307 struct jme_desc *desc; 2308 struct jme_rxdesc *rxd; 2309 struct mbuf *mp, *m; 2310 uint32_t flags, status, hash, hashinfo; 2311 int cons, count, nsegs; 2312 2313 cons = rdata->jme_rx_cons; 2314 desc = &rdata->jme_rx_ring[cons]; 2315 2316 flags = le32toh(desc->flags); 2317 status = le32toh(desc->buflen); 2318 hash = le32toh(desc->addr_hi); 2319 hashinfo = le32toh(desc->addr_lo); 2320 nsegs = JME_RX_NSEGS(status); 2321 2322 if (nsegs > 1) { 2323 /* Skip the first descriptor. */ 2324 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2325 2326 /* 2327 * Clear the OWN bit of the following RX descriptors; 2328 * hardware will not clear the OWN bit except the first 2329 * RX descriptor. 2330 * 2331 * Since the first RX descriptor is setup, i.e. OWN bit 2332 * on, before its followins RX descriptors, leaving the 2333 * OWN bit on the following RX descriptors will trick 2334 * the hardware into thinking that the following RX 2335 * descriptors are ready to be used too. 2336 */ 2337 for (count = 1; count < nsegs; count++, 2338 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) 2339 rdata->jme_rx_ring[cons].flags = 0; 2340 2341 cons = rdata->jme_rx_cons; 2342 } 2343 2344 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, " 2345 "hash 0x%08x, hash info 0x%08x\n", 2346 rdata->jme_rx_idx, flags, hash, hashinfo); 2347 2348 if (status & JME_RX_ERR_STAT) { 2349 ifp->if_ierrors++; 2350 jme_discard_rxbufs(rdata, cons, nsegs); 2351 #ifdef JME_SHOW_ERRORS 2352 if_printf(ifp, "%s : receive error = 0x%b\n", 2353 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2354 #endif 2355 rdata->jme_rx_cons += nsegs; 2356 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2357 return; 2358 } 2359 2360 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2361 for (count = 0; count < nsegs; count++, 2362 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) { 2363 rxd = &rdata->jme_rxdesc[cons]; 2364 mp = rxd->rx_m; 2365 2366 /* Add a new receive buffer to the ring. */ 2367 if (jme_newbuf(rdata, rxd, 0) != 0) { 2368 ifp->if_iqdrops++; 2369 /* Reuse buffer. */ 2370 jme_discard_rxbufs(rdata, cons, nsegs - count); 2371 if (rdata->jme_rxhead != NULL) { 2372 m_freem(rdata->jme_rxhead); 2373 JME_RXCHAIN_RESET(rdata); 2374 } 2375 break; 2376 } 2377 2378 /* 2379 * Assume we've received a full sized frame. 2380 * Actual size is fixed when we encounter the end of 2381 * multi-segmented frame. 2382 */ 2383 mp->m_len = MCLBYTES; 2384 2385 /* Chain received mbufs. */ 2386 if (rdata->jme_rxhead == NULL) { 2387 rdata->jme_rxhead = mp; 2388 rdata->jme_rxtail = mp; 2389 } else { 2390 /* 2391 * Receive processor can receive a maximum frame 2392 * size of 65535 bytes. 2393 */ 2394 rdata->jme_rxtail->m_next = mp; 2395 rdata->jme_rxtail = mp; 2396 } 2397 2398 if (count == nsegs - 1) { 2399 struct pktinfo pi0, *pi; 2400 2401 /* Last desc. for this frame. */ 2402 m = rdata->jme_rxhead; 2403 m->m_pkthdr.len = rdata->jme_rxlen; 2404 if (nsegs > 1) { 2405 /* Set first mbuf size. */ 2406 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2407 /* Set last mbuf size. */ 2408 mp->m_len = rdata->jme_rxlen - 2409 ((MCLBYTES - JME_RX_PAD_BYTES) + 2410 (MCLBYTES * (nsegs - 2))); 2411 } else { 2412 m->m_len = rdata->jme_rxlen; 2413 } 2414 m->m_pkthdr.rcvif = ifp; 2415 2416 /* 2417 * Account for 10bytes auto padding which is used 2418 * to align IP header on 32bit boundary. Also note, 2419 * CRC bytes is automatically removed by the 2420 * hardware. 2421 */ 2422 m->m_data += JME_RX_PAD_BYTES; 2423 2424 /* Set checksum information. */ 2425 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2426 (flags & JME_RD_IPV4)) { 2427 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2428 if (flags & JME_RD_IPCSUM) 2429 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2430 if ((flags & JME_RD_MORE_FRAG) == 0 && 2431 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2432 (JME_RD_TCP | JME_RD_TCPCSUM) || 2433 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2434 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2435 m->m_pkthdr.csum_flags |= 2436 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2437 m->m_pkthdr.csum_data = 0xffff; 2438 } 2439 } 2440 2441 /* Check for VLAN tagged packets. */ 2442 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2443 (flags & JME_RD_VLAN_TAG)) { 2444 m->m_pkthdr.ether_vlantag = 2445 flags & JME_RD_VLAN_MASK; 2446 m->m_flags |= M_VLANTAG; 2447 } 2448 2449 ifp->if_ipackets++; 2450 2451 if (ifp->if_capenable & IFCAP_RSS) 2452 pi = jme_pktinfo(&pi0, flags); 2453 else 2454 pi = NULL; 2455 2456 if (pi != NULL && 2457 (hashinfo & JME_RD_HASH_FN_MASK) == 2458 JME_RD_HASH_FN_TOEPLITZ) { 2459 m->m_flags |= (M_HASH | M_CKHASH); 2460 m->m_pkthdr.hash = toeplitz_hash(hash); 2461 } 2462 2463 #ifdef JME_RSS_DEBUG 2464 if (pi != NULL) { 2465 JME_RSS_DPRINTF(rdata->jme_sc, 10, 2466 "isr %d flags %08x, l3 %d %s\n", 2467 pi->pi_netisr, pi->pi_flags, 2468 pi->pi_l3proto, 2469 (m->m_flags & M_HASH) ? "hash" : ""); 2470 } 2471 #endif 2472 2473 /* Pass it on. */ 2474 ether_input_pkt(ifp, m, pi); 2475 2476 /* Reset mbuf chains. */ 2477 JME_RXCHAIN_RESET(rdata); 2478 #ifdef JME_RSS_DEBUG 2479 rdata->jme_rx_pkt++; 2480 #endif 2481 } 2482 } 2483 2484 rdata->jme_rx_cons += nsegs; 2485 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2486 } 2487 2488 static void 2489 jme_rxeof(struct jme_rxdata *rdata, int count) 2490 { 2491 struct jme_desc *desc; 2492 int nsegs, pktlen; 2493 2494 for (;;) { 2495 #ifdef IFPOLL_ENABLE 2496 if (count >= 0 && count-- == 0) 2497 break; 2498 #endif 2499 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2500 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2501 break; 2502 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2503 break; 2504 2505 /* 2506 * Check number of segments against received bytes. 2507 * Non-matching value would indicate that hardware 2508 * is still trying to update Rx descriptors. I'm not 2509 * sure whether this check is needed. 2510 */ 2511 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2512 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2513 if (nsegs != howmany(pktlen, MCLBYTES)) { 2514 if_printf(&rdata->jme_sc->arpcom.ac_if, 2515 "RX fragment count(%d) and " 2516 "packet size(%d) mismach\n", nsegs, pktlen); 2517 break; 2518 } 2519 2520 /* 2521 * NOTE: 2522 * RSS hash and hash information may _not_ be set by the 2523 * hardware even if the OWN bit is cleared and VALID bit 2524 * is set. 2525 * 2526 * If the RSS information is not delivered by the hardware 2527 * yet, we MUST NOT accept this packet, let alone reusing 2528 * its RX descriptor. If this packet was accepted and its 2529 * RX descriptor was reused before hardware delivering the 2530 * RSS information, the RX buffer's address would be trashed 2531 * by the RSS information delivered by the hardware. 2532 */ 2533 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 2534 struct jme_rxdesc *rxd; 2535 uint32_t hashinfo; 2536 2537 hashinfo = le32toh(desc->addr_lo); 2538 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons]; 2539 2540 /* 2541 * This test should be enough to detect the pending 2542 * RSS information delivery, given: 2543 * - If RSS hash is not calculated, the hashinfo 2544 * will be 0. Howvever, the lower 32bits of RX 2545 * buffers' physical address will never be 0. 2546 * (see jme_rxbuf_dma_filter) 2547 * - If RSS hash is calculated, the lowest 4 bits 2548 * of hashinfo will be set, while the RX buffers 2549 * are at least 2K aligned. 2550 */ 2551 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) { 2552 #ifdef JME_SHOW_RSSWB 2553 if_printf(&rdata->jme_sc->arpcom.ac_if, 2554 "RSS is not written back yet\n"); 2555 #endif 2556 break; 2557 } 2558 } 2559 2560 /* Received a frame. */ 2561 jme_rxpkt(rdata); 2562 } 2563 } 2564 2565 static void 2566 jme_tick(void *xsc) 2567 { 2568 struct jme_softc *sc = xsc; 2569 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2570 2571 lwkt_serialize_enter(&sc->jme_serialize); 2572 2573 KKASSERT(mycpuid == JME_TICK_CPUID); 2574 2575 sc->jme_in_tick = TRUE; 2576 mii_tick(mii); 2577 sc->jme_in_tick = FALSE; 2578 2579 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2580 2581 lwkt_serialize_exit(&sc->jme_serialize); 2582 } 2583 2584 static void 2585 jme_reset(struct jme_softc *sc) 2586 { 2587 uint32_t val; 2588 2589 /* Make sure that TX and RX are stopped */ 2590 jme_stop_tx(sc); 2591 jme_stop_rx(sc); 2592 2593 /* Start reset */ 2594 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2595 DELAY(20); 2596 2597 /* 2598 * Hold reset bit before stop reset 2599 */ 2600 2601 /* Disable TXMAC and TXOFL clock sources */ 2602 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2603 /* Disable RXMAC clock source */ 2604 val = CSR_READ_4(sc, JME_GPREG1); 2605 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2606 /* Flush */ 2607 CSR_READ_4(sc, JME_GHC); 2608 2609 /* Stop reset */ 2610 CSR_WRITE_4(sc, JME_GHC, 0); 2611 /* Flush */ 2612 CSR_READ_4(sc, JME_GHC); 2613 2614 /* 2615 * Clear reset bit after stop reset 2616 */ 2617 2618 /* Enable TXMAC and TXOFL clock sources */ 2619 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2620 /* Enable RXMAC clock source */ 2621 val = CSR_READ_4(sc, JME_GPREG1); 2622 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2623 /* Flush */ 2624 CSR_READ_4(sc, JME_GHC); 2625 2626 /* Disable TXMAC and TXOFL clock sources */ 2627 CSR_WRITE_4(sc, JME_GHC, 0); 2628 /* Disable RXMAC clock source */ 2629 val = CSR_READ_4(sc, JME_GPREG1); 2630 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2631 /* Flush */ 2632 CSR_READ_4(sc, JME_GHC); 2633 2634 /* Enable TX and RX */ 2635 val = CSR_READ_4(sc, JME_TXCSR); 2636 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB); 2637 val = CSR_READ_4(sc, JME_RXCSR); 2638 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB); 2639 /* Flush */ 2640 CSR_READ_4(sc, JME_TXCSR); 2641 CSR_READ_4(sc, JME_RXCSR); 2642 2643 /* Enable TXMAC and TXOFL clock sources */ 2644 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2645 /* Eisable RXMAC clock source */ 2646 val = CSR_READ_4(sc, JME_GPREG1); 2647 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2648 /* Flush */ 2649 CSR_READ_4(sc, JME_GHC); 2650 2651 /* Stop TX and RX */ 2652 jme_stop_tx(sc); 2653 jme_stop_rx(sc); 2654 } 2655 2656 static void 2657 jme_init(void *xsc) 2658 { 2659 struct jme_softc *sc = xsc; 2660 struct ifnet *ifp = &sc->arpcom.ac_if; 2661 struct mii_data *mii; 2662 uint8_t eaddr[ETHER_ADDR_LEN]; 2663 bus_addr_t paddr; 2664 uint32_t reg; 2665 int error, r; 2666 2667 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2668 2669 /* 2670 * Cancel any pending I/O. 2671 */ 2672 jme_stop(sc); 2673 2674 /* 2675 * Reset the chip to a known state. 2676 */ 2677 jme_reset(sc); 2678 2679 /* 2680 * Setup MSI/MSI-X vectors to interrupts mapping 2681 */ 2682 jme_set_msinum(sc); 2683 2684 if (JME_ENABLE_HWRSS(sc)) 2685 jme_enable_rss(sc); 2686 else 2687 jme_disable_rss(sc); 2688 2689 /* Init RX descriptors */ 2690 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2691 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]); 2692 if (error) { 2693 if_printf(ifp, "initialization failed: " 2694 "no memory for %dth RX ring.\n", r); 2695 jme_stop(sc); 2696 return; 2697 } 2698 } 2699 2700 /* Init TX descriptors */ 2701 jme_init_tx_ring(&sc->jme_cdata.jme_tx_data); 2702 2703 /* Initialize shadow status block. */ 2704 jme_init_ssb(sc); 2705 2706 /* Reprogram the station address. */ 2707 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2708 CSR_WRITE_4(sc, JME_PAR0, 2709 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2710 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2711 2712 /* 2713 * Configure Tx queue. 2714 * Tx priority queue weight value : 0 2715 * Tx FIFO threshold for processing next packet : 16QW 2716 * Maximum Tx DMA length : 512 2717 * Allow Tx DMA burst. 2718 */ 2719 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2720 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2721 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2722 sc->jme_txcsr |= sc->jme_tx_dma_size; 2723 sc->jme_txcsr |= TXCSR_DMA_BURST; 2724 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2725 2726 /* Set Tx descriptor counter. */ 2727 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt); 2728 2729 /* Set Tx ring address to the hardware. */ 2730 paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr; 2731 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2732 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2733 2734 /* Configure TxMAC parameters. */ 2735 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2736 reg |= TXMAC_THRESH_1_PKT; 2737 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2738 CSR_WRITE_4(sc, JME_TXMAC, reg); 2739 2740 /* 2741 * Configure Rx queue. 2742 * FIFO full threshold for transmitting Tx pause packet : 128T 2743 * FIFO threshold for processing next packet : 128QW 2744 * Rx queue 0 select 2745 * Max Rx DMA length : 128 2746 * Rx descriptor retry : 32 2747 * Rx descriptor retry time gap : 256ns 2748 * Don't receive runt/bad frame. 2749 */ 2750 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2751 #if 0 2752 /* 2753 * Since Rx FIFO size is 4K bytes, receiving frames larger 2754 * than 4K bytes will suffer from Rx FIFO overruns. So 2755 * decrease FIFO threshold to reduce the FIFO overruns for 2756 * frames larger than 4000 bytes. 2757 * For best performance of standard MTU sized frames use 2758 * maximum allowable FIFO threshold, 128QW. 2759 */ 2760 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2761 JME_RX_FIFO_SIZE) 2762 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2763 else 2764 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2765 #else 2766 /* Improve PCI Express compatibility */ 2767 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2768 #endif 2769 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2770 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2771 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2772 /* XXX TODO DROP_BAD */ 2773 2774 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2775 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 2776 2777 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2778 2779 /* Set Rx descriptor counter. */ 2780 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt); 2781 2782 /* Set Rx ring address to the hardware. */ 2783 paddr = rdata->jme_rx_ring_paddr; 2784 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2785 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2786 } 2787 2788 /* Clear receive filter. */ 2789 CSR_WRITE_4(sc, JME_RXMAC, 0); 2790 2791 /* Set up the receive filter. */ 2792 jme_set_filter(sc); 2793 jme_set_vlan(sc); 2794 2795 /* 2796 * Disable all WOL bits as WOL can interfere normal Rx 2797 * operation. Also clear WOL detection status bits. 2798 */ 2799 reg = CSR_READ_4(sc, JME_PMCS); 2800 reg &= ~PMCS_WOL_ENB_MASK; 2801 CSR_WRITE_4(sc, JME_PMCS, reg); 2802 2803 /* 2804 * Pad 10bytes right before received frame. This will greatly 2805 * help Rx performance on strict-alignment architectures as 2806 * it does not need to copy the frame to align the payload. 2807 */ 2808 reg = CSR_READ_4(sc, JME_RXMAC); 2809 reg |= RXMAC_PAD_10BYTES; 2810 2811 if (ifp->if_capenable & IFCAP_RXCSUM) 2812 reg |= RXMAC_CSUM_ENB; 2813 CSR_WRITE_4(sc, JME_RXMAC, reg); 2814 2815 /* Configure general purpose reg0 */ 2816 reg = CSR_READ_4(sc, JME_GPREG0); 2817 reg &= ~GPREG0_PCC_UNIT_MASK; 2818 /* Set PCC timer resolution to micro-seconds unit. */ 2819 reg |= GPREG0_PCC_UNIT_US; 2820 /* 2821 * Disable all shadow register posting as we have to read 2822 * JME_INTR_STATUS register in jme_intr. Also it seems 2823 * that it's hard to synchronize interrupt status between 2824 * hardware and software with shadow posting due to 2825 * requirements of bus_dmamap_sync(9). 2826 */ 2827 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2828 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2829 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2830 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2831 /* Disable posting of DW0. */ 2832 reg &= ~GPREG0_POST_DW0_ENB; 2833 /* Clear PME message. */ 2834 reg &= ~GPREG0_PME_ENB; 2835 /* Set PHY address. */ 2836 reg &= ~GPREG0_PHY_ADDR_MASK; 2837 reg |= sc->jme_phyaddr; 2838 CSR_WRITE_4(sc, JME_GPREG0, reg); 2839 2840 /* Configure Tx queue 0 packet completion coalescing. */ 2841 jme_set_tx_coal(sc); 2842 2843 /* Configure Rx queues packet completion coalescing. */ 2844 jme_set_rx_coal(sc); 2845 2846 /* Configure shadow status block but don't enable posting. */ 2847 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2848 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2849 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2850 2851 /* Disable Timer 1 and Timer 2. */ 2852 CSR_WRITE_4(sc, JME_TIMER1, 0); 2853 CSR_WRITE_4(sc, JME_TIMER2, 0); 2854 2855 /* Configure retry transmit period, retry limit value. */ 2856 CSR_WRITE_4(sc, JME_TXTRHD, 2857 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2858 TXTRHD_RT_PERIOD_MASK) | 2859 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2860 TXTRHD_RT_LIMIT_SHIFT)); 2861 2862 #ifdef IFPOLL_ENABLE 2863 if (!(ifp->if_flags & IFF_NPOLLING)) 2864 #endif 2865 /* Initialize the interrupt mask. */ 2866 jme_enable_intr(sc); 2867 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2868 2869 /* 2870 * Enabling Tx/Rx DMA engines and Rx queue processing is 2871 * done after detection of valid link in jme_miibus_statchg. 2872 */ 2873 sc->jme_has_link = FALSE; 2874 2875 /* Set the current media. */ 2876 mii = device_get_softc(sc->jme_miibus); 2877 mii_mediachg(mii); 2878 2879 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc, 2880 JME_TICK_CPUID); 2881 2882 ifp->if_flags |= IFF_RUNNING; 2883 ifq_clr_oactive(&ifp->if_snd); 2884 } 2885 2886 static void 2887 jme_stop(struct jme_softc *sc) 2888 { 2889 struct ifnet *ifp = &sc->arpcom.ac_if; 2890 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data; 2891 struct jme_txdesc *txd; 2892 struct jme_rxdesc *rxd; 2893 struct jme_rxdata *rdata; 2894 int i, r; 2895 2896 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2897 2898 /* 2899 * Mark the interface down and cancel the watchdog timer. 2900 */ 2901 ifp->if_flags &= ~IFF_RUNNING; 2902 ifq_clr_oactive(&ifp->if_snd); 2903 ifp->if_timer = 0; 2904 2905 callout_stop(&sc->jme_tick_ch); 2906 sc->jme_has_link = FALSE; 2907 2908 /* 2909 * Disable interrupts. 2910 */ 2911 jme_disable_intr(sc); 2912 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2913 2914 /* Disable updating shadow status block. */ 2915 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2916 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2917 2918 /* Stop receiver, transmitter. */ 2919 jme_stop_rx(sc); 2920 jme_stop_tx(sc); 2921 2922 /* 2923 * Free partial finished RX segments 2924 */ 2925 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2926 rdata = &sc->jme_cdata.jme_rx_data[r]; 2927 if (rdata->jme_rxhead != NULL) 2928 m_freem(rdata->jme_rxhead); 2929 JME_RXCHAIN_RESET(rdata); 2930 } 2931 2932 /* 2933 * Free RX and TX mbufs still in the queues. 2934 */ 2935 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2936 rdata = &sc->jme_cdata.jme_rx_data[r]; 2937 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 2938 rxd = &rdata->jme_rxdesc[i]; 2939 if (rxd->rx_m != NULL) { 2940 bus_dmamap_unload(rdata->jme_rx_tag, 2941 rxd->rx_dmamap); 2942 m_freem(rxd->rx_m); 2943 rxd->rx_m = NULL; 2944 } 2945 } 2946 } 2947 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 2948 txd = &tdata->jme_txdesc[i]; 2949 if (txd->tx_m != NULL) { 2950 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap); 2951 m_freem(txd->tx_m); 2952 txd->tx_m = NULL; 2953 txd->tx_ndesc = 0; 2954 } 2955 } 2956 } 2957 2958 static void 2959 jme_stop_tx(struct jme_softc *sc) 2960 { 2961 uint32_t reg; 2962 int i; 2963 2964 reg = CSR_READ_4(sc, JME_TXCSR); 2965 if ((reg & TXCSR_TX_ENB) == 0) 2966 return; 2967 reg &= ~TXCSR_TX_ENB; 2968 CSR_WRITE_4(sc, JME_TXCSR, reg); 2969 for (i = JME_TIMEOUT; i > 0; i--) { 2970 DELAY(1); 2971 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2972 break; 2973 } 2974 if (i == 0) 2975 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2976 } 2977 2978 static void 2979 jme_stop_rx(struct jme_softc *sc) 2980 { 2981 uint32_t reg; 2982 int i; 2983 2984 reg = CSR_READ_4(sc, JME_RXCSR); 2985 if ((reg & RXCSR_RX_ENB) == 0) 2986 return; 2987 reg &= ~RXCSR_RX_ENB; 2988 CSR_WRITE_4(sc, JME_RXCSR, reg); 2989 for (i = JME_TIMEOUT; i > 0; i--) { 2990 DELAY(1); 2991 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2992 break; 2993 } 2994 if (i == 0) 2995 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2996 } 2997 2998 static void 2999 jme_init_tx_ring(struct jme_txdata *tdata) 3000 { 3001 struct jme_txdesc *txd; 3002 int i; 3003 3004 tdata->jme_tx_prod = 0; 3005 tdata->jme_tx_cons = 0; 3006 tdata->jme_tx_cnt = 0; 3007 3008 bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata)); 3009 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) { 3010 txd = &tdata->jme_txdesc[i]; 3011 txd->tx_m = NULL; 3012 txd->tx_desc = &tdata->jme_tx_ring[i]; 3013 txd->tx_ndesc = 0; 3014 } 3015 } 3016 3017 static void 3018 jme_init_ssb(struct jme_softc *sc) 3019 { 3020 struct jme_chain_data *cd; 3021 3022 cd = &sc->jme_cdata; 3023 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 3024 } 3025 3026 static int 3027 jme_init_rx_ring(struct jme_rxdata *rdata) 3028 { 3029 struct jme_rxdesc *rxd; 3030 int i; 3031 3032 KKASSERT(rdata->jme_rxhead == NULL && 3033 rdata->jme_rxtail == NULL && 3034 rdata->jme_rxlen == 0); 3035 rdata->jme_rx_cons = 0; 3036 3037 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata)); 3038 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3039 int error; 3040 3041 rxd = &rdata->jme_rxdesc[i]; 3042 rxd->rx_m = NULL; 3043 rxd->rx_desc = &rdata->jme_rx_ring[i]; 3044 error = jme_newbuf(rdata, rxd, 1); 3045 if (error) 3046 return error; 3047 } 3048 return 0; 3049 } 3050 3051 static int 3052 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init) 3053 { 3054 struct mbuf *m; 3055 bus_dma_segment_t segs; 3056 bus_dmamap_t map; 3057 int error, nsegs; 3058 3059 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3060 if (m == NULL) 3061 return ENOBUFS; 3062 /* 3063 * JMC250 has 64bit boundary alignment limitation so jme(4) 3064 * takes advantage of 10 bytes padding feature of hardware 3065 * in order not to copy entire frame to align IP header on 3066 * 32bit boundary. 3067 */ 3068 m->m_len = m->m_pkthdr.len = MCLBYTES; 3069 3070 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 3071 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 3072 BUS_DMA_NOWAIT); 3073 if (error) { 3074 m_freem(m); 3075 if (init) { 3076 if_printf(&rdata->jme_sc->arpcom.ac_if, 3077 "can't load RX mbuf\n"); 3078 } 3079 return error; 3080 } 3081 3082 if (rxd->rx_m != NULL) { 3083 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 3084 BUS_DMASYNC_POSTREAD); 3085 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 3086 } 3087 map = rxd->rx_dmamap; 3088 rxd->rx_dmamap = rdata->jme_rx_sparemap; 3089 rdata->jme_rx_sparemap = map; 3090 rxd->rx_m = m; 3091 rxd->rx_paddr = segs.ds_addr; 3092 3093 jme_setup_rxdesc(rxd); 3094 return 0; 3095 } 3096 3097 static void 3098 jme_set_vlan(struct jme_softc *sc) 3099 { 3100 struct ifnet *ifp = &sc->arpcom.ac_if; 3101 uint32_t reg; 3102 3103 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3104 3105 reg = CSR_READ_4(sc, JME_RXMAC); 3106 reg &= ~RXMAC_VLAN_ENB; 3107 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 3108 reg |= RXMAC_VLAN_ENB; 3109 CSR_WRITE_4(sc, JME_RXMAC, reg); 3110 } 3111 3112 static void 3113 jme_set_filter(struct jme_softc *sc) 3114 { 3115 struct ifnet *ifp = &sc->arpcom.ac_if; 3116 struct ifmultiaddr *ifma; 3117 uint32_t crc; 3118 uint32_t mchash[2]; 3119 uint32_t rxcfg; 3120 3121 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3122 3123 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3124 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3125 RXMAC_ALLMULTI); 3126 3127 /* 3128 * Always accept frames destined to our station address. 3129 * Always accept broadcast frames. 3130 */ 3131 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 3132 3133 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 3134 if (ifp->if_flags & IFF_PROMISC) 3135 rxcfg |= RXMAC_PROMISC; 3136 if (ifp->if_flags & IFF_ALLMULTI) 3137 rxcfg |= RXMAC_ALLMULTI; 3138 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3139 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3140 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3141 return; 3142 } 3143 3144 /* 3145 * Set up the multicast address filter by passing all multicast 3146 * addresses through a CRC generator, and then using the low-order 3147 * 6 bits as an index into the 64 bit multicast hash table. The 3148 * high order bits select the register, while the rest of the bits 3149 * select the bit within the register. 3150 */ 3151 rxcfg |= RXMAC_MULTICAST; 3152 bzero(mchash, sizeof(mchash)); 3153 3154 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3155 if (ifma->ifma_addr->sa_family != AF_LINK) 3156 continue; 3157 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3158 ifma->ifma_addr), ETHER_ADDR_LEN); 3159 3160 /* Just want the 6 least significant bits. */ 3161 crc &= 0x3f; 3162 3163 /* Set the corresponding bit in the hash table. */ 3164 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3165 } 3166 3167 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3168 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3169 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3170 } 3171 3172 static int 3173 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 3174 { 3175 struct jme_softc *sc = arg1; 3176 struct ifnet *ifp = &sc->arpcom.ac_if; 3177 int error, v; 3178 3179 ifnet_serialize_all(ifp); 3180 3181 v = sc->jme_tx_coal_to; 3182 error = sysctl_handle_int(oidp, &v, 0, req); 3183 if (error || req->newptr == NULL) 3184 goto back; 3185 3186 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 3187 error = EINVAL; 3188 goto back; 3189 } 3190 3191 if (v != sc->jme_tx_coal_to) { 3192 sc->jme_tx_coal_to = v; 3193 if (ifp->if_flags & IFF_RUNNING) 3194 jme_set_tx_coal(sc); 3195 } 3196 back: 3197 ifnet_deserialize_all(ifp); 3198 return error; 3199 } 3200 3201 static int 3202 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3203 { 3204 struct jme_softc *sc = arg1; 3205 struct ifnet *ifp = &sc->arpcom.ac_if; 3206 int error, v; 3207 3208 ifnet_serialize_all(ifp); 3209 3210 v = sc->jme_tx_coal_pkt; 3211 error = sysctl_handle_int(oidp, &v, 0, req); 3212 if (error || req->newptr == NULL) 3213 goto back; 3214 3215 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 3216 error = EINVAL; 3217 goto back; 3218 } 3219 3220 if (v != sc->jme_tx_coal_pkt) { 3221 sc->jme_tx_coal_pkt = v; 3222 if (ifp->if_flags & IFF_RUNNING) 3223 jme_set_tx_coal(sc); 3224 } 3225 back: 3226 ifnet_deserialize_all(ifp); 3227 return error; 3228 } 3229 3230 static int 3231 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 3232 { 3233 struct jme_softc *sc = arg1; 3234 struct ifnet *ifp = &sc->arpcom.ac_if; 3235 int error, v; 3236 3237 ifnet_serialize_all(ifp); 3238 3239 v = sc->jme_rx_coal_to; 3240 error = sysctl_handle_int(oidp, &v, 0, req); 3241 if (error || req->newptr == NULL) 3242 goto back; 3243 3244 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 3245 error = EINVAL; 3246 goto back; 3247 } 3248 3249 if (v != sc->jme_rx_coal_to) { 3250 sc->jme_rx_coal_to = v; 3251 if (ifp->if_flags & IFF_RUNNING) 3252 jme_set_rx_coal(sc); 3253 } 3254 back: 3255 ifnet_deserialize_all(ifp); 3256 return error; 3257 } 3258 3259 static int 3260 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3261 { 3262 struct jme_softc *sc = arg1; 3263 struct ifnet *ifp = &sc->arpcom.ac_if; 3264 int error, v; 3265 3266 ifnet_serialize_all(ifp); 3267 3268 v = sc->jme_rx_coal_pkt; 3269 error = sysctl_handle_int(oidp, &v, 0, req); 3270 if (error || req->newptr == NULL) 3271 goto back; 3272 3273 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 3274 error = EINVAL; 3275 goto back; 3276 } 3277 3278 if (v != sc->jme_rx_coal_pkt) { 3279 sc->jme_rx_coal_pkt = v; 3280 if (ifp->if_flags & IFF_RUNNING) 3281 jme_set_rx_coal(sc); 3282 } 3283 back: 3284 ifnet_deserialize_all(ifp); 3285 return error; 3286 } 3287 3288 static void 3289 jme_set_tx_coal(struct jme_softc *sc) 3290 { 3291 uint32_t reg; 3292 3293 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3294 PCCTX_COAL_TO_MASK; 3295 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3296 PCCTX_COAL_PKT_MASK; 3297 reg |= PCCTX_COAL_TXQ0; 3298 CSR_WRITE_4(sc, JME_PCCTX, reg); 3299 } 3300 3301 static void 3302 jme_set_rx_coal(struct jme_softc *sc) 3303 { 3304 uint32_t reg; 3305 int r; 3306 3307 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3308 PCCRX_COAL_TO_MASK; 3309 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3310 PCCRX_COAL_PKT_MASK; 3311 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) 3312 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 3313 } 3314 3315 #ifdef IFPOLL_ENABLE 3316 3317 static void 3318 jme_npoll_status(struct ifnet *ifp) 3319 { 3320 struct jme_softc *sc = ifp->if_softc; 3321 uint32_t status; 3322 3323 ASSERT_SERIALIZED(&sc->jme_serialize); 3324 3325 status = CSR_READ_4(sc, JME_INTR_STATUS); 3326 if (status & INTR_RXQ_DESC_EMPTY) { 3327 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3328 jme_rx_restart(sc, status); 3329 } 3330 } 3331 3332 static void 3333 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3334 { 3335 struct jme_rxdata *rdata = arg; 3336 3337 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3338 3339 jme_rxeof(rdata, cycle); 3340 } 3341 3342 static void 3343 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 3344 { 3345 struct jme_txdata *tdata = arg; 3346 3347 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3348 3349 jme_txeof(tdata); 3350 if (!ifq_is_empty(&ifp->if_snd)) 3351 if_devstart(ifp); 3352 } 3353 3354 static void 3355 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3356 { 3357 struct jme_softc *sc = ifp->if_softc; 3358 3359 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3360 3361 if (info) { 3362 int i, off; 3363 3364 info->ifpi_status.status_func = jme_npoll_status; 3365 info->ifpi_status.serializer = &sc->jme_serialize; 3366 3367 off = sc->jme_npoll_txoff; 3368 KKASSERT(off <= ncpus2); 3369 info->ifpi_tx[off].poll_func = jme_npoll_tx; 3370 info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data; 3371 info->ifpi_tx[off].serializer = 3372 &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3373 3374 off = sc->jme_npoll_rxoff; 3375 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3376 struct jme_rxdata *rdata = 3377 &sc->jme_cdata.jme_rx_data[i]; 3378 int idx = i + off; 3379 3380 info->ifpi_rx[idx].poll_func = jme_npoll_rx; 3381 info->ifpi_rx[idx].arg = rdata; 3382 info->ifpi_rx[idx].serializer = 3383 &rdata->jme_rx_serialize; 3384 } 3385 3386 if (ifp->if_flags & IFF_RUNNING) 3387 jme_disable_intr(sc); 3388 ifp->if_npoll_cpuid = sc->jme_npoll_txoff; 3389 } else { 3390 if (ifp->if_flags & IFF_RUNNING) 3391 jme_enable_intr(sc); 3392 ifp->if_npoll_cpuid = -1; 3393 } 3394 } 3395 3396 static int 3397 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3398 { 3399 struct jme_softc *sc = (void *)arg1; 3400 struct ifnet *ifp = &sc->arpcom.ac_if; 3401 int error, off; 3402 3403 off = sc->jme_npoll_rxoff; 3404 error = sysctl_handle_int(oidp, &off, 0, req); 3405 if (error || req->newptr == NULL) 3406 return error; 3407 if (off < 0) 3408 return EINVAL; 3409 3410 ifnet_serialize_all(ifp); 3411 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3412 error = EINVAL; 3413 } else { 3414 error = 0; 3415 sc->jme_npoll_rxoff = off; 3416 } 3417 ifnet_deserialize_all(ifp); 3418 3419 return error; 3420 } 3421 3422 static int 3423 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3424 { 3425 struct jme_softc *sc = (void *)arg1; 3426 struct ifnet *ifp = &sc->arpcom.ac_if; 3427 int error, off; 3428 3429 off = sc->jme_npoll_txoff; 3430 error = sysctl_handle_int(oidp, &off, 0, req); 3431 if (error || req->newptr == NULL) 3432 return error; 3433 if (off < 0) 3434 return EINVAL; 3435 3436 ifnet_serialize_all(ifp); 3437 if (off >= ncpus2) { 3438 error = EINVAL; 3439 } else { 3440 error = 0; 3441 sc->jme_npoll_txoff = off; 3442 } 3443 ifnet_deserialize_all(ifp); 3444 3445 return error; 3446 } 3447 3448 #endif /* IFPOLL_ENABLE */ 3449 3450 static int 3451 jme_rxring_dma_alloc(struct jme_rxdata *rdata) 3452 { 3453 bus_dmamem_t dmem; 3454 int error, asize; 3455 3456 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN); 3457 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag, 3458 JME_RX_RING_ALIGN, 0, 3459 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3460 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3461 if (error) { 3462 device_printf(rdata->jme_sc->jme_dev, 3463 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx); 3464 return error; 3465 } 3466 rdata->jme_rx_ring_tag = dmem.dmem_tag; 3467 rdata->jme_rx_ring_map = dmem.dmem_map; 3468 rdata->jme_rx_ring = dmem.dmem_addr; 3469 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 3470 3471 return 0; 3472 } 3473 3474 static int 3475 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr) 3476 { 3477 if ((paddr & 0xffffffff) == 0) { 3478 /* 3479 * Don't allow lower 32bits of the RX buffer's 3480 * physical address to be 0, else it will break 3481 * hardware pending RSS information delivery 3482 * detection on RX path. 3483 */ 3484 return 1; 3485 } 3486 return 0; 3487 } 3488 3489 static int 3490 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata) 3491 { 3492 bus_addr_t lowaddr; 3493 int i, error; 3494 3495 lowaddr = BUS_SPACE_MAXADDR; 3496 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 3497 /* jme_rxbuf_dma_filter will be called */ 3498 lowaddr = BUS_SPACE_MAXADDR_32BIT; 3499 } 3500 3501 /* Create tag for Rx buffers. */ 3502 error = bus_dma_tag_create( 3503 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */ 3504 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3505 lowaddr, /* lowaddr */ 3506 BUS_SPACE_MAXADDR, /* highaddr */ 3507 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */ 3508 MCLBYTES, /* maxsize */ 3509 1, /* nsegments */ 3510 MCLBYTES, /* maxsegsize */ 3511 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3512 &rdata->jme_rx_tag); 3513 if (error) { 3514 device_printf(rdata->jme_sc->jme_dev, 3515 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx); 3516 return error; 3517 } 3518 3519 /* Create DMA maps for Rx buffers. */ 3520 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3521 &rdata->jme_rx_sparemap); 3522 if (error) { 3523 device_printf(rdata->jme_sc->jme_dev, 3524 "could not create %dth spare Rx dmamap.\n", 3525 rdata->jme_rx_idx); 3526 bus_dma_tag_destroy(rdata->jme_rx_tag); 3527 rdata->jme_rx_tag = NULL; 3528 return error; 3529 } 3530 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3531 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3532 3533 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3534 &rxd->rx_dmamap); 3535 if (error) { 3536 int j; 3537 3538 device_printf(rdata->jme_sc->jme_dev, 3539 "could not create %dth Rx dmamap " 3540 "for %dth RX ring.\n", i, rdata->jme_rx_idx); 3541 3542 for (j = 0; j < i; ++j) { 3543 rxd = &rdata->jme_rxdesc[j]; 3544 bus_dmamap_destroy(rdata->jme_rx_tag, 3545 rxd->rx_dmamap); 3546 } 3547 bus_dmamap_destroy(rdata->jme_rx_tag, 3548 rdata->jme_rx_sparemap); 3549 bus_dma_tag_destroy(rdata->jme_rx_tag); 3550 rdata->jme_rx_tag = NULL; 3551 return error; 3552 } 3553 } 3554 return 0; 3555 } 3556 3557 static void 3558 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3559 { 3560 int r; 3561 3562 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3563 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3564 3565 if (status & rdata->jme_rx_coal) { 3566 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3567 jme_rxeof(rdata, -1); 3568 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3569 } 3570 } 3571 } 3572 3573 static void 3574 jme_enable_rss(struct jme_softc *sc) 3575 { 3576 uint32_t rssc, ind; 3577 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE]; 3578 int i; 3579 3580 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 || 3581 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4, 3582 ("%s: invalid # of RX rings (%d)", 3583 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt)); 3584 3585 rssc = RSSC_HASH_64_ENTRY; 3586 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3587 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1; 3588 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3589 CSR_WRITE_4(sc, JME_RSSC, rssc); 3590 3591 toeplitz_get_key(key, sizeof(key)); 3592 for (i = 0; i < RSSKEY_NREGS; ++i) { 3593 uint32_t keyreg; 3594 3595 keyreg = RSSKEY_REGVAL(key, i); 3596 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg); 3597 3598 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg); 3599 } 3600 3601 /* 3602 * Create redirect table in following fashion: 3603 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3604 */ 3605 ind = 0; 3606 for (i = 0; i < RSSTBL_REGSIZE; ++i) { 3607 int q; 3608 3609 q = i % sc->jme_cdata.jme_rx_ring_cnt; 3610 ind |= q << (i * 8); 3611 } 3612 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3613 3614 for (i = 0; i < RSSTBL_NREGS; ++i) 3615 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3616 } 3617 3618 static void 3619 jme_disable_rss(struct jme_softc *sc) 3620 { 3621 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3622 } 3623 3624 static void 3625 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3626 { 3627 struct jme_softc *sc = ifp->if_softc; 3628 3629 ifnet_serialize_array_enter(sc->jme_serialize_arr, 3630 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3631 } 3632 3633 static void 3634 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3635 { 3636 struct jme_softc *sc = ifp->if_softc; 3637 3638 ifnet_serialize_array_exit(sc->jme_serialize_arr, 3639 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3640 } 3641 3642 static int 3643 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3644 { 3645 struct jme_softc *sc = ifp->if_softc; 3646 3647 return ifnet_serialize_array_try(sc->jme_serialize_arr, 3648 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3649 } 3650 3651 #ifdef INVARIANTS 3652 3653 static void 3654 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3655 boolean_t serialized) 3656 { 3657 struct jme_softc *sc = ifp->if_softc; 3658 3659 ifnet_serialize_array_assert(sc->jme_serialize_arr, 3660 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, 3661 slz, serialized); 3662 } 3663 3664 #endif /* INVARIANTS */ 3665 3666 static void 3667 jme_msix_try_alloc(device_t dev) 3668 { 3669 struct jme_softc *sc = device_get_softc(dev); 3670 struct jme_msix_data *msix; 3671 int error, i, r, msix_enable, msix_count; 3672 int offset, offset_def; 3673 3674 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt); 3675 KKASSERT(msix_count <= JME_NMSIX); 3676 3677 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable); 3678 3679 /* 3680 * We leave the 1st MSI-X vector unused, so we 3681 * actually need msix_count + 1 MSI-X vectors. 3682 */ 3683 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1)) 3684 return; 3685 3686 for (i = 0; i < msix_count; ++i) 3687 sc->jme_msix[i].jme_msix_rid = -1; 3688 3689 i = 0; 3690 3691 /* 3692 * Setup status MSI-X 3693 */ 3694 3695 msix = &sc->jme_msix[i++]; 3696 msix->jme_msix_cpuid = 0; 3697 msix->jme_msix_arg = sc; 3698 msix->jme_msix_func = jme_msix_status; 3699 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3700 msix->jme_msix_intrs |= 3701 sc->jme_cdata.jme_rx_data[r].jme_rx_empty; 3702 } 3703 msix->jme_msix_serialize = &sc->jme_serialize; 3704 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts", 3705 device_get_nameunit(dev)); 3706 3707 /* 3708 * Setup TX MSI-X 3709 */ 3710 3711 offset_def = device_get_unit(dev) % ncpus2; 3712 offset = device_getenv_int(dev, "msix.txoff", offset_def); 3713 if (offset >= ncpus2) { 3714 device_printf(dev, "invalid msix.txoff %d, use %d\n", 3715 offset, offset_def); 3716 offset = offset_def; 3717 } 3718 3719 msix = &sc->jme_msix[i++]; 3720 msix->jme_msix_cpuid = offset; 3721 sc->jme_tx_cpuid = msix->jme_msix_cpuid; 3722 msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data; 3723 msix->jme_msix_func = jme_msix_tx; 3724 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO; 3725 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize; 3726 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx", 3727 device_get_nameunit(dev)); 3728 3729 /* 3730 * Setup RX MSI-X 3731 */ 3732 3733 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 3734 offset = 0; 3735 } else { 3736 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 3737 device_get_unit(dev)) % ncpus2; 3738 3739 offset = device_getenv_int(dev, "msix.rxoff", offset_def); 3740 if (offset >= ncpus2 || 3741 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3742 device_printf(dev, "invalid msix.rxoff %d, use %d\n", 3743 offset, offset_def); 3744 offset = offset_def; 3745 } 3746 } 3747 3748 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3749 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3750 3751 msix = &sc->jme_msix[i++]; 3752 msix->jme_msix_cpuid = r + offset; 3753 KKASSERT(msix->jme_msix_cpuid < ncpus2); 3754 msix->jme_msix_arg = rdata; 3755 msix->jme_msix_func = jme_msix_rx; 3756 msix->jme_msix_intrs = rdata->jme_rx_coal; 3757 msix->jme_msix_serialize = &rdata->jme_rx_serialize; 3758 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), 3759 "%s rx%d", device_get_nameunit(dev), r); 3760 } 3761 3762 KKASSERT(i == msix_count); 3763 3764 error = pci_setup_msix(dev); 3765 if (error) 3766 return; 3767 3768 /* Setup jme_msix_cnt early, so we could cleanup */ 3769 sc->jme_msix_cnt = msix_count; 3770 3771 for (i = 0; i < msix_count; ++i) { 3772 msix = &sc->jme_msix[i]; 3773 3774 msix->jme_msix_vector = i + 1; 3775 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector, 3776 &msix->jme_msix_rid, msix->jme_msix_cpuid); 3777 if (error) 3778 goto back; 3779 3780 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3781 &msix->jme_msix_rid, RF_ACTIVE); 3782 if (msix->jme_msix_res == NULL) { 3783 error = ENOMEM; 3784 goto back; 3785 } 3786 } 3787 3788 for (i = 0; i < JME_INTR_CNT; ++i) { 3789 uint32_t intr_mask = (1 << i); 3790 int x; 3791 3792 if ((JME_INTRS & intr_mask) == 0) 3793 continue; 3794 3795 for (x = 0; x < msix_count; ++x) { 3796 msix = &sc->jme_msix[x]; 3797 if (msix->jme_msix_intrs & intr_mask) { 3798 int reg, shift; 3799 3800 reg = i / JME_MSINUM_FACTOR; 3801 KKASSERT(reg < JME_MSINUM_CNT); 3802 3803 shift = (i % JME_MSINUM_FACTOR) * 4; 3804 3805 sc->jme_msinum[reg] |= 3806 (msix->jme_msix_vector << shift); 3807 3808 break; 3809 } 3810 } 3811 } 3812 3813 if (bootverbose) { 3814 for (i = 0; i < JME_MSINUM_CNT; ++i) { 3815 device_printf(dev, "MSINUM%d: %#x\n", i, 3816 sc->jme_msinum[i]); 3817 } 3818 } 3819 3820 pci_enable_msix(dev); 3821 sc->jme_irq_type = PCI_INTR_TYPE_MSIX; 3822 3823 back: 3824 if (error) 3825 jme_msix_free(dev); 3826 } 3827 3828 static int 3829 jme_intr_alloc(device_t dev) 3830 { 3831 struct jme_softc *sc = device_get_softc(dev); 3832 u_int irq_flags; 3833 3834 jme_msix_try_alloc(dev); 3835 3836 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3837 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable, 3838 &sc->jme_irq_rid, &irq_flags); 3839 3840 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3841 &sc->jme_irq_rid, irq_flags); 3842 if (sc->jme_irq_res == NULL) { 3843 device_printf(dev, "can't allocate irq\n"); 3844 return ENXIO; 3845 } 3846 } 3847 return 0; 3848 } 3849 3850 static void 3851 jme_msix_free(device_t dev) 3852 { 3853 struct jme_softc *sc = device_get_softc(dev); 3854 int i; 3855 3856 KKASSERT(sc->jme_msix_cnt > 1); 3857 3858 for (i = 0; i < sc->jme_msix_cnt; ++i) { 3859 struct jme_msix_data *msix = &sc->jme_msix[i]; 3860 3861 if (msix->jme_msix_res != NULL) { 3862 bus_release_resource(dev, SYS_RES_IRQ, 3863 msix->jme_msix_rid, msix->jme_msix_res); 3864 msix->jme_msix_res = NULL; 3865 } 3866 if (msix->jme_msix_rid >= 0) { 3867 pci_release_msix_vector(dev, msix->jme_msix_rid); 3868 msix->jme_msix_rid = -1; 3869 } 3870 } 3871 pci_teardown_msix(dev); 3872 } 3873 3874 static void 3875 jme_intr_free(device_t dev) 3876 { 3877 struct jme_softc *sc = device_get_softc(dev); 3878 3879 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3880 if (sc->jme_irq_res != NULL) { 3881 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 3882 sc->jme_irq_res); 3883 } 3884 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI) 3885 pci_release_msi(dev); 3886 } else { 3887 jme_msix_free(dev); 3888 } 3889 } 3890 3891 static void 3892 jme_msix_tx(void *xtdata) 3893 { 3894 struct jme_txdata *tdata = xtdata; 3895 struct jme_softc *sc = tdata->jme_sc; 3896 struct ifnet *ifp = &sc->arpcom.ac_if; 3897 3898 ASSERT_SERIALIZED(&tdata->jme_tx_serialize); 3899 3900 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3901 3902 CSR_WRITE_4(sc, JME_INTR_STATUS, 3903 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP); 3904 3905 if (ifp->if_flags & IFF_RUNNING) { 3906 jme_txeof(tdata); 3907 if (!ifq_is_empty(&ifp->if_snd)) 3908 if_devstart(ifp); 3909 } 3910 3911 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3912 } 3913 3914 static void 3915 jme_msix_rx(void *xrdata) 3916 { 3917 struct jme_rxdata *rdata = xrdata; 3918 struct jme_softc *sc = rdata->jme_sc; 3919 struct ifnet *ifp = &sc->arpcom.ac_if; 3920 3921 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3922 3923 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal); 3924 3925 CSR_WRITE_4(sc, JME_INTR_STATUS, 3926 rdata->jme_rx_coal | rdata->jme_rx_comp); 3927 3928 if (ifp->if_flags & IFF_RUNNING) 3929 jme_rxeof(rdata, -1); 3930 3931 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal); 3932 } 3933 3934 static void 3935 jme_msix_status(void *xsc) 3936 { 3937 struct jme_softc *sc = xsc; 3938 struct ifnet *ifp = &sc->arpcom.ac_if; 3939 uint32_t status; 3940 3941 ASSERT_SERIALIZED(&sc->jme_serialize); 3942 3943 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY); 3944 3945 status = CSR_READ_4(sc, JME_INTR_STATUS); 3946 3947 if (status & INTR_RXQ_DESC_EMPTY) { 3948 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY); 3949 if (ifp->if_flags & IFF_RUNNING) 3950 jme_rx_restart(sc, status); 3951 } 3952 3953 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY); 3954 } 3955 3956 static void 3957 jme_rx_restart(struct jme_softc *sc, uint32_t status) 3958 { 3959 int i; 3960 3961 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 3962 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 3963 3964 if (status & rdata->jme_rx_empty) { 3965 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3966 jme_rxeof(rdata, -1); 3967 #ifdef JME_RSS_DEBUG 3968 rdata->jme_rx_emp++; 3969 #endif 3970 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3971 } 3972 } 3973 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 3974 RXCSR_RXQ_START); 3975 } 3976 3977 static void 3978 jme_set_msinum(struct jme_softc *sc) 3979 { 3980 int i; 3981 3982 for (i = 0; i < JME_MSINUM_CNT; ++i) 3983 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]); 3984 } 3985 3986 static int 3987 jme_intr_setup(device_t dev) 3988 { 3989 struct jme_softc *sc = device_get_softc(dev); 3990 struct ifnet *ifp = &sc->arpcom.ac_if; 3991 int error; 3992 3993 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3994 return jme_msix_setup(dev); 3995 3996 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, 3997 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize); 3998 if (error) { 3999 device_printf(dev, "could not set up interrupt handler.\n"); 4000 return error; 4001 } 4002 4003 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res); 4004 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 4005 return 0; 4006 } 4007 4008 static void 4009 jme_intr_teardown(device_t dev) 4010 { 4011 struct jme_softc *sc = device_get_softc(dev); 4012 4013 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 4014 jme_msix_teardown(dev, sc->jme_msix_cnt); 4015 else 4016 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 4017 } 4018 4019 static int 4020 jme_msix_setup(device_t dev) 4021 { 4022 struct jme_softc *sc = device_get_softc(dev); 4023 struct ifnet *ifp = &sc->arpcom.ac_if; 4024 int x; 4025 4026 for (x = 0; x < sc->jme_msix_cnt; ++x) { 4027 struct jme_msix_data *msix = &sc->jme_msix[x]; 4028 int error; 4029 4030 error = bus_setup_intr_descr(dev, msix->jme_msix_res, 4031 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg, 4032 &msix->jme_msix_handle, msix->jme_msix_serialize, 4033 msix->jme_msix_desc); 4034 if (error) { 4035 device_printf(dev, "could not set up %s " 4036 "interrupt handler.\n", msix->jme_msix_desc); 4037 jme_msix_teardown(dev, x); 4038 return error; 4039 } 4040 } 4041 ifp->if_cpuid = sc->jme_tx_cpuid; 4042 return 0; 4043 } 4044 4045 static void 4046 jme_msix_teardown(device_t dev, int msix_count) 4047 { 4048 struct jme_softc *sc = device_get_softc(dev); 4049 int x; 4050 4051 for (x = 0; x < msix_count; ++x) { 4052 struct jme_msix_data *msix = &sc->jme_msix[x]; 4053 4054 bus_teardown_intr(dev, msix->jme_msix_res, 4055 msix->jme_msix_handle); 4056 } 4057 } 4058 4059 static void 4060 jme_serialize_skipmain(struct jme_softc *sc) 4061 { 4062 lwkt_serialize_array_enter(sc->jme_serialize_arr, 4063 sc->jme_serialize_cnt, 1); 4064 } 4065 4066 static void 4067 jme_deserialize_skipmain(struct jme_softc *sc) 4068 { 4069 lwkt_serialize_array_exit(sc->jme_serialize_arr, 4070 sc->jme_serialize_cnt, 1); 4071 } 4072 4073 static void 4074 jme_enable_intr(struct jme_softc *sc) 4075 { 4076 int i; 4077 4078 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4079 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]); 4080 4081 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 4082 } 4083 4084 static void 4085 jme_disable_intr(struct jme_softc *sc) 4086 { 4087 int i; 4088 4089 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 4090 4091 for (i = 0; i < sc->jme_serialize_cnt; ++i) 4092 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]); 4093 } 4094