1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 */ 29 30 #include "opt_polling.h" 31 #include "opt_jme.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/serialize2.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/bpf.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/ifq_var.h> 54 #include <net/toeplitz.h> 55 #include <net/toeplitz2.h> 56 #include <net/vlan/if_vlan_var.h> 57 #include <net/vlan/if_vlan_ether.h> 58 59 #include <netinet/ip.h> 60 #include <netinet/tcp.h> 61 62 #include <dev/netif/mii_layer/miivar.h> 63 #include <dev/netif/mii_layer/jmphyreg.h> 64 65 #include <bus/pci/pcireg.h> 66 #include <bus/pci/pcivar.h> 67 #include <bus/pci/pcidevs.h> 68 69 #include <dev/netif/jme/if_jmereg.h> 70 #include <dev/netif/jme/if_jmevar.h> 71 72 #include "miibus_if.h" 73 74 #define JME_TX_SERIALIZE 1 75 #define JME_RX_SERIALIZE 2 76 77 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 78 79 #ifdef JME_RSS_DEBUG 80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 81 do { \ 82 if ((sc)->jme_rss_debug >= (lvl)) \ 83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 84 } while (0) 85 #else /* !JME_RSS_DEBUG */ 86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 87 #endif /* JME_RSS_DEBUG */ 88 89 static int jme_probe(device_t); 90 static int jme_attach(device_t); 91 static int jme_detach(device_t); 92 static int jme_shutdown(device_t); 93 static int jme_suspend(device_t); 94 static int jme_resume(device_t); 95 96 static int jme_miibus_readreg(device_t, int, int); 97 static int jme_miibus_writereg(device_t, int, int, int); 98 static void jme_miibus_statchg(device_t); 99 100 static void jme_init(void *); 101 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 102 static void jme_start(struct ifnet *); 103 static void jme_watchdog(struct ifnet *); 104 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 105 static int jme_mediachange(struct ifnet *); 106 #ifdef DEVICE_POLLING 107 static void jme_poll(struct ifnet *, enum poll_cmd, int); 108 #endif 109 static void jme_serialize(struct ifnet *, enum ifnet_serialize); 110 static void jme_deserialize(struct ifnet *, enum ifnet_serialize); 111 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize); 112 #ifdef INVARIANTS 113 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize, 114 boolean_t); 115 #endif 116 117 static void jme_intr(void *); 118 static void jme_msix_tx(void *); 119 static void jme_msix_rx(void *); 120 static void jme_txeof(struct jme_softc *); 121 static void jme_rxeof(struct jme_rxdata *, int); 122 static void jme_rx_intr(struct jme_softc *, uint32_t); 123 124 static int jme_msix_setup(device_t); 125 static void jme_msix_teardown(device_t, int); 126 static int jme_intr_setup(device_t); 127 static void jme_intr_teardown(device_t); 128 static void jme_msix_try_alloc(device_t); 129 static void jme_msix_free(device_t); 130 static int jme_intr_alloc(device_t); 131 static void jme_intr_free(device_t); 132 static int jme_dma_alloc(struct jme_softc *); 133 static void jme_dma_free(struct jme_softc *); 134 static int jme_init_rx_ring(struct jme_rxdata *); 135 static void jme_init_tx_ring(struct jme_softc *); 136 static void jme_init_ssb(struct jme_softc *); 137 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int); 138 static int jme_encap(struct jme_softc *, struct mbuf **); 139 static void jme_rxpkt(struct jme_rxdata *); 140 static int jme_rxring_dma_alloc(struct jme_rxdata *); 141 static int jme_rxbuf_dma_alloc(struct jme_rxdata *); 142 static int jme_rxbuf_dma_filter(void *, bus_addr_t); 143 144 static void jme_tick(void *); 145 static void jme_stop(struct jme_softc *); 146 static void jme_reset(struct jme_softc *); 147 static void jme_set_msinum(struct jme_softc *); 148 static void jme_set_vlan(struct jme_softc *); 149 static void jme_set_filter(struct jme_softc *); 150 static void jme_stop_tx(struct jme_softc *); 151 static void jme_stop_rx(struct jme_softc *); 152 static void jme_mac_config(struct jme_softc *); 153 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 154 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 155 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 156 #ifdef notyet 157 static void jme_setwol(struct jme_softc *); 158 static void jme_setlinkspeed(struct jme_softc *); 159 #endif 160 static void jme_set_tx_coal(struct jme_softc *); 161 static void jme_set_rx_coal(struct jme_softc *); 162 static void jme_enable_rss(struct jme_softc *); 163 static void jme_disable_rss(struct jme_softc *); 164 static void jme_serialize_skipmain(struct jme_softc *); 165 static void jme_deserialize_skipmain(struct jme_softc *); 166 167 static void jme_sysctl_node(struct jme_softc *); 168 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 169 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 170 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 171 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 172 173 /* 174 * Devices supported by this driver. 175 */ 176 static const struct jme_dev { 177 uint16_t jme_vendorid; 178 uint16_t jme_deviceid; 179 uint32_t jme_caps; 180 const char *jme_name; 181 } jme_devs[] = { 182 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 183 JME_CAP_JUMBO, 184 "JMicron Inc, JMC250 Gigabit Ethernet" }, 185 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 186 JME_CAP_FASTETH, 187 "JMicron Inc, JMC260 Fast Ethernet" }, 188 { 0, 0, 0, NULL } 189 }; 190 191 static device_method_t jme_methods[] = { 192 /* Device interface. */ 193 DEVMETHOD(device_probe, jme_probe), 194 DEVMETHOD(device_attach, jme_attach), 195 DEVMETHOD(device_detach, jme_detach), 196 DEVMETHOD(device_shutdown, jme_shutdown), 197 DEVMETHOD(device_suspend, jme_suspend), 198 DEVMETHOD(device_resume, jme_resume), 199 200 /* Bus interface. */ 201 DEVMETHOD(bus_print_child, bus_generic_print_child), 202 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 203 204 /* MII interface. */ 205 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 206 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 207 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 208 209 { NULL, NULL } 210 }; 211 212 static driver_t jme_driver = { 213 "jme", 214 jme_methods, 215 sizeof(struct jme_softc) 216 }; 217 218 static devclass_t jme_devclass; 219 220 DECLARE_DUMMY_MODULE(if_jme); 221 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 222 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL); 223 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL); 224 225 static const struct { 226 uint32_t jme_coal; 227 uint32_t jme_comp; 228 uint32_t jme_empty; 229 } jme_rx_status[JME_NRXRING_MAX] = { 230 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP, 231 INTR_RXQ0_DESC_EMPTY }, 232 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP, 233 INTR_RXQ1_DESC_EMPTY }, 234 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP, 235 INTR_RXQ2_DESC_EMPTY }, 236 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP, 237 INTR_RXQ3_DESC_EMPTY } 238 }; 239 240 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 241 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 242 static int jme_rx_ring_count = 0; 243 static int jme_msi_enable = 1; 244 static int jme_msix_enable = 1; 245 246 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 247 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 248 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 249 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable); 250 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable); 251 252 static __inline void 253 jme_setup_rxdesc(struct jme_rxdesc *rxd) 254 { 255 struct jme_desc *desc; 256 257 desc = rxd->rx_desc; 258 desc->buflen = htole32(MCLBYTES); 259 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr)); 260 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr)); 261 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 262 } 263 264 /* 265 * Read a PHY register on the MII of the JMC250. 266 */ 267 static int 268 jme_miibus_readreg(device_t dev, int phy, int reg) 269 { 270 struct jme_softc *sc = device_get_softc(dev); 271 uint32_t val; 272 int i; 273 274 /* For FPGA version, PHY address 0 should be ignored. */ 275 if (sc->jme_caps & JME_CAP_FPGA) { 276 if (phy == 0) 277 return (0); 278 } else { 279 if (sc->jme_phyaddr != phy) 280 return (0); 281 } 282 283 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 284 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 285 286 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 287 DELAY(1); 288 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 289 break; 290 } 291 if (i == 0) { 292 device_printf(sc->jme_dev, "phy read timeout: " 293 "phy %d, reg %d\n", phy, reg); 294 return (0); 295 } 296 297 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 298 } 299 300 /* 301 * Write a PHY register on the MII of the JMC250. 302 */ 303 static int 304 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 305 { 306 struct jme_softc *sc = device_get_softc(dev); 307 int i; 308 309 /* For FPGA version, PHY address 0 should be ignored. */ 310 if (sc->jme_caps & JME_CAP_FPGA) { 311 if (phy == 0) 312 return (0); 313 } else { 314 if (sc->jme_phyaddr != phy) 315 return (0); 316 } 317 318 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 319 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 320 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 321 322 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 323 DELAY(1); 324 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 325 break; 326 } 327 if (i == 0) { 328 device_printf(sc->jme_dev, "phy write timeout: " 329 "phy %d, reg %d\n", phy, reg); 330 } 331 332 return (0); 333 } 334 335 /* 336 * Callback from MII layer when media changes. 337 */ 338 static void 339 jme_miibus_statchg(device_t dev) 340 { 341 struct jme_softc *sc = device_get_softc(dev); 342 struct ifnet *ifp = &sc->arpcom.ac_if; 343 struct mii_data *mii; 344 struct jme_txdesc *txd; 345 bus_addr_t paddr; 346 int i, r; 347 348 if (sc->jme_in_tick) 349 jme_serialize_skipmain(sc); 350 ASSERT_IFNET_SERIALIZED_ALL(ifp); 351 352 if ((ifp->if_flags & IFF_RUNNING) == 0) 353 goto done; 354 355 mii = device_get_softc(sc->jme_miibus); 356 357 sc->jme_has_link = FALSE; 358 if ((mii->mii_media_status & IFM_AVALID) != 0) { 359 switch (IFM_SUBTYPE(mii->mii_media_active)) { 360 case IFM_10_T: 361 case IFM_100_TX: 362 sc->jme_has_link = TRUE; 363 break; 364 case IFM_1000_T: 365 if (sc->jme_caps & JME_CAP_FASTETH) 366 break; 367 sc->jme_has_link = TRUE; 368 break; 369 default: 370 break; 371 } 372 } 373 374 /* 375 * Disabling Rx/Tx MACs have a side-effect of resetting 376 * JME_TXNDA/JME_RXNDA register to the first address of 377 * Tx/Rx descriptor address. So driver should reset its 378 * internal procucer/consumer pointer and reclaim any 379 * allocated resources. Note, just saving the value of 380 * JME_TXNDA and JME_RXNDA registers before stopping MAC 381 * and restoring JME_TXNDA/JME_RXNDA register is not 382 * sufficient to make sure correct MAC state because 383 * stopping MAC operation can take a while and hardware 384 * might have updated JME_TXNDA/JME_RXNDA registers 385 * during the stop operation. 386 */ 387 388 /* Disable interrupts */ 389 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 390 391 /* Stop driver */ 392 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 393 ifp->if_timer = 0; 394 callout_stop(&sc->jme_tick_ch); 395 396 /* Stop receiver/transmitter. */ 397 jme_stop_rx(sc); 398 jme_stop_tx(sc); 399 400 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 401 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 402 403 jme_rxeof(rdata, -1); 404 if (rdata->jme_rxhead != NULL) 405 m_freem(rdata->jme_rxhead); 406 JME_RXCHAIN_RESET(rdata); 407 408 /* 409 * Reuse configured Rx descriptors and reset 410 * procuder/consumer index. 411 */ 412 rdata->jme_rx_cons = 0; 413 } 414 if (JME_ENABLE_HWRSS(sc)) 415 jme_enable_rss(sc); 416 else 417 jme_disable_rss(sc); 418 419 jme_txeof(sc); 420 if (sc->jme_cdata.jme_tx_cnt != 0) { 421 /* Remove queued packets for transmit. */ 422 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) { 423 txd = &sc->jme_cdata.jme_txdesc[i]; 424 if (txd->tx_m != NULL) { 425 bus_dmamap_unload( 426 sc->jme_cdata.jme_tx_tag, 427 txd->tx_dmamap); 428 m_freem(txd->tx_m); 429 txd->tx_m = NULL; 430 txd->tx_ndesc = 0; 431 ifp->if_oerrors++; 432 } 433 } 434 } 435 jme_init_tx_ring(sc); 436 437 /* Initialize shadow status block. */ 438 jme_init_ssb(sc); 439 440 /* Program MAC with resolved speed/duplex/flow-control. */ 441 if (sc->jme_has_link) { 442 jme_mac_config(sc); 443 444 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 445 446 /* Set Tx ring address to the hardware. */ 447 paddr = sc->jme_cdata.jme_tx_ring_paddr; 448 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 449 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 450 451 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 452 CSR_WRITE_4(sc, JME_RXCSR, 453 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 454 455 /* Set Rx ring address to the hardware. */ 456 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 457 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 458 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 459 } 460 461 /* Restart receiver/transmitter. */ 462 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 463 RXCSR_RXQ_START); 464 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 465 } 466 467 ifp->if_flags |= IFF_RUNNING; 468 ifp->if_flags &= ~IFF_OACTIVE; 469 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 470 471 #ifdef DEVICE_POLLING 472 if (!(ifp->if_flags & IFF_POLLING)) 473 #endif 474 /* Reenable interrupts. */ 475 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 476 477 done: 478 if (sc->jme_in_tick) 479 jme_deserialize_skipmain(sc); 480 } 481 482 /* 483 * Get the current interface media status. 484 */ 485 static void 486 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 487 { 488 struct jme_softc *sc = ifp->if_softc; 489 struct mii_data *mii = device_get_softc(sc->jme_miibus); 490 491 ASSERT_IFNET_SERIALIZED_ALL(ifp); 492 493 mii_pollstat(mii); 494 ifmr->ifm_status = mii->mii_media_status; 495 ifmr->ifm_active = mii->mii_media_active; 496 } 497 498 /* 499 * Set hardware to newly-selected media. 500 */ 501 static int 502 jme_mediachange(struct ifnet *ifp) 503 { 504 struct jme_softc *sc = ifp->if_softc; 505 struct mii_data *mii = device_get_softc(sc->jme_miibus); 506 int error; 507 508 ASSERT_IFNET_SERIALIZED_ALL(ifp); 509 510 if (mii->mii_instance != 0) { 511 struct mii_softc *miisc; 512 513 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 514 mii_phy_reset(miisc); 515 } 516 error = mii_mediachg(mii); 517 518 return (error); 519 } 520 521 static int 522 jme_probe(device_t dev) 523 { 524 const struct jme_dev *sp; 525 uint16_t vid, did; 526 527 vid = pci_get_vendor(dev); 528 did = pci_get_device(dev); 529 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 530 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 531 struct jme_softc *sc = device_get_softc(dev); 532 533 sc->jme_caps = sp->jme_caps; 534 device_set_desc(dev, sp->jme_name); 535 return (0); 536 } 537 } 538 return (ENXIO); 539 } 540 541 static int 542 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 543 { 544 uint32_t reg; 545 int i; 546 547 *val = 0; 548 for (i = JME_TIMEOUT; i > 0; i--) { 549 reg = CSR_READ_4(sc, JME_SMBCSR); 550 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 551 break; 552 DELAY(1); 553 } 554 555 if (i == 0) { 556 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 557 return (ETIMEDOUT); 558 } 559 560 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 561 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 562 for (i = JME_TIMEOUT; i > 0; i--) { 563 DELAY(1); 564 reg = CSR_READ_4(sc, JME_SMBINTF); 565 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 566 break; 567 } 568 569 if (i == 0) { 570 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 571 return (ETIMEDOUT); 572 } 573 574 reg = CSR_READ_4(sc, JME_SMBINTF); 575 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 576 577 return (0); 578 } 579 580 static int 581 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 582 { 583 uint8_t fup, reg, val; 584 uint32_t offset; 585 int match; 586 587 offset = 0; 588 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 589 fup != JME_EEPROM_SIG0) 590 return (ENOENT); 591 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 592 fup != JME_EEPROM_SIG1) 593 return (ENOENT); 594 match = 0; 595 do { 596 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 597 break; 598 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 599 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 600 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 601 break; 602 if (reg >= JME_PAR0 && 603 reg < JME_PAR0 + ETHER_ADDR_LEN) { 604 if (jme_eeprom_read_byte(sc, offset + 2, 605 &val) != 0) 606 break; 607 eaddr[reg - JME_PAR0] = val; 608 match++; 609 } 610 } 611 /* Check for the end of EEPROM descriptor. */ 612 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 613 break; 614 /* Try next eeprom descriptor. */ 615 offset += JME_EEPROM_DESC_BYTES; 616 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 617 618 if (match == ETHER_ADDR_LEN) 619 return (0); 620 621 return (ENOENT); 622 } 623 624 static void 625 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 626 { 627 uint32_t par0, par1; 628 629 /* Read station address. */ 630 par0 = CSR_READ_4(sc, JME_PAR0); 631 par1 = CSR_READ_4(sc, JME_PAR1); 632 par1 &= 0xFFFF; 633 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 634 device_printf(sc->jme_dev, 635 "generating fake ethernet address.\n"); 636 par0 = karc4random(); 637 /* Set OUI to JMicron. */ 638 eaddr[0] = 0x00; 639 eaddr[1] = 0x1B; 640 eaddr[2] = 0x8C; 641 eaddr[3] = (par0 >> 16) & 0xff; 642 eaddr[4] = (par0 >> 8) & 0xff; 643 eaddr[5] = par0 & 0xff; 644 } else { 645 eaddr[0] = (par0 >> 0) & 0xFF; 646 eaddr[1] = (par0 >> 8) & 0xFF; 647 eaddr[2] = (par0 >> 16) & 0xFF; 648 eaddr[3] = (par0 >> 24) & 0xFF; 649 eaddr[4] = (par1 >> 0) & 0xFF; 650 eaddr[5] = (par1 >> 8) & 0xFF; 651 } 652 } 653 654 static int 655 jme_attach(device_t dev) 656 { 657 struct jme_softc *sc = device_get_softc(dev); 658 struct ifnet *ifp = &sc->arpcom.ac_if; 659 uint32_t reg; 660 uint16_t did; 661 uint8_t pcie_ptr, rev; 662 int error = 0, i, j, rx_desc_cnt; 663 uint8_t eaddr[ETHER_ADDR_LEN]; 664 665 lwkt_serialize_init(&sc->jme_serialize); 666 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize); 667 for (i = 0; i < JME_NRXRING_MAX; ++i) { 668 lwkt_serialize_init( 669 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize); 670 } 671 672 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count", 673 jme_rx_desc_count); 674 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN); 675 if (rx_desc_cnt > JME_NDESC_MAX) 676 rx_desc_cnt = JME_NDESC_MAX; 677 678 sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count", 679 jme_tx_desc_count); 680 sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt, 681 JME_NDESC_ALIGN); 682 if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX) 683 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX; 684 685 /* 686 * Calculate rx rings 687 */ 688 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count", 689 jme_rx_ring_count); 690 sc->jme_cdata.jme_rx_ring_cnt = 691 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX); 692 693 i = 0; 694 sc->jme_serialize_arr[i++] = &sc->jme_serialize; 695 696 KKASSERT(i == JME_TX_SERIALIZE); 697 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize; 698 699 KKASSERT(i == JME_RX_SERIALIZE); 700 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) { 701 sc->jme_serialize_arr[i++] = 702 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize; 703 } 704 KKASSERT(i <= JME_NSERIALIZE); 705 sc->jme_serialize_cnt = i; 706 707 sc->jme_cdata.jme_sc = sc; 708 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 709 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 710 711 rdata->jme_sc = sc; 712 rdata->jme_rx_coal = jme_rx_status[i].jme_coal; 713 rdata->jme_rx_comp = jme_rx_status[i].jme_comp; 714 rdata->jme_rx_empty = jme_rx_status[i].jme_empty; 715 rdata->jme_rx_idx = i; 716 rdata->jme_rx_desc_cnt = rx_desc_cnt; 717 } 718 719 sc->jme_dev = dev; 720 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 721 722 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 723 724 callout_init(&sc->jme_tick_ch); 725 726 #ifndef BURN_BRIDGES 727 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 728 uint32_t irq, mem; 729 730 irq = pci_read_config(dev, PCIR_INTLINE, 4); 731 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 732 733 device_printf(dev, "chip is in D%d power mode " 734 "-- setting to D0\n", pci_get_powerstate(dev)); 735 736 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 737 738 pci_write_config(dev, PCIR_INTLINE, irq, 4); 739 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 740 } 741 #endif /* !BURN_BRIDGE */ 742 743 /* Enable bus mastering */ 744 pci_enable_busmaster(dev); 745 746 /* 747 * Allocate IO memory 748 * 749 * JMC250 supports both memory mapped and I/O register space 750 * access. Because I/O register access should use different 751 * BARs to access registers it's waste of time to use I/O 752 * register spce access. JMC250 uses 16K to map entire memory 753 * space. 754 */ 755 sc->jme_mem_rid = JME_PCIR_BAR; 756 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 757 &sc->jme_mem_rid, RF_ACTIVE); 758 if (sc->jme_mem_res == NULL) { 759 device_printf(dev, "can't allocate IO memory\n"); 760 return ENXIO; 761 } 762 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 763 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 764 765 /* 766 * Allocate IRQ 767 */ 768 error = jme_intr_alloc(dev); 769 if (error) 770 goto fail; 771 772 /* 773 * Extract revisions 774 */ 775 reg = CSR_READ_4(sc, JME_CHIPMODE); 776 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 777 CHIPMODE_NOT_FPGA) { 778 sc->jme_caps |= JME_CAP_FPGA; 779 if (bootverbose) { 780 device_printf(dev, "FPGA revision: 0x%04x\n", 781 (reg & CHIPMODE_FPGA_REV_MASK) >> 782 CHIPMODE_FPGA_REV_SHIFT); 783 } 784 } 785 786 /* NOTE: FM revision is put in the upper 4 bits */ 787 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 788 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 789 if (bootverbose) 790 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 791 792 did = pci_get_device(dev); 793 switch (did) { 794 case PCI_PRODUCT_JMICRON_JMC250: 795 if (rev == JME_REV1_A2) 796 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 797 break; 798 799 case PCI_PRODUCT_JMICRON_JMC260: 800 if (rev == JME_REV2) 801 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 802 break; 803 804 default: 805 panic("unknown device id 0x%04x", did); 806 } 807 if (rev >= JME_REV2) { 808 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 809 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 810 GHC_TXMAC_CLKSRC_1000; 811 } 812 813 /* Reset the ethernet controller. */ 814 jme_reset(sc); 815 816 /* Map MSI/MSI-X vectors */ 817 jme_set_msinum(sc); 818 819 /* Get station address. */ 820 reg = CSR_READ_4(sc, JME_SMBCSR); 821 if (reg & SMBCSR_EEPROM_PRESENT) 822 error = jme_eeprom_macaddr(sc, eaddr); 823 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 824 if (error != 0 && (bootverbose)) { 825 device_printf(dev, "ethernet hardware address " 826 "not found in EEPROM.\n"); 827 } 828 jme_reg_macaddr(sc, eaddr); 829 } 830 831 /* 832 * Save PHY address. 833 * Integrated JR0211 has fixed PHY address whereas FPGA version 834 * requires PHY probing to get correct PHY address. 835 */ 836 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 837 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 838 GPREG0_PHY_ADDR_MASK; 839 if (bootverbose) { 840 device_printf(dev, "PHY is at address %d.\n", 841 sc->jme_phyaddr); 842 } 843 } else { 844 sc->jme_phyaddr = 0; 845 } 846 847 /* Set max allowable DMA size. */ 848 pcie_ptr = pci_get_pciecap_ptr(dev); 849 if (pcie_ptr != 0) { 850 uint16_t ctrl; 851 852 sc->jme_caps |= JME_CAP_PCIE; 853 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 854 if (bootverbose) { 855 device_printf(dev, "Read request size : %d bytes.\n", 856 128 << ((ctrl >> 12) & 0x07)); 857 device_printf(dev, "TLP payload size : %d bytes.\n", 858 128 << ((ctrl >> 5) & 0x07)); 859 } 860 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 861 case PCIEM_DEVCTL_MAX_READRQ_128: 862 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 863 break; 864 case PCIEM_DEVCTL_MAX_READRQ_256: 865 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 866 break; 867 default: 868 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 869 break; 870 } 871 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 872 } else { 873 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 874 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 875 } 876 877 #ifdef notyet 878 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 879 sc->jme_caps |= JME_CAP_PMCAP; 880 #endif 881 882 /* 883 * Create sysctl tree 884 */ 885 jme_sysctl_node(sc); 886 887 /* Allocate DMA stuffs */ 888 error = jme_dma_alloc(sc); 889 if (error) 890 goto fail; 891 892 ifp->if_softc = sc; 893 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 894 ifp->if_init = jme_init; 895 ifp->if_ioctl = jme_ioctl; 896 ifp->if_start = jme_start; 897 #ifdef DEVICE_POLLING 898 ifp->if_poll = jme_poll; 899 #endif 900 ifp->if_watchdog = jme_watchdog; 901 ifp->if_serialize = jme_serialize; 902 ifp->if_deserialize = jme_deserialize; 903 ifp->if_tryserialize = jme_tryserialize; 904 #ifdef INVARIANTS 905 ifp->if_serialize_assert = jme_serialize_assert; 906 #endif 907 ifq_set_maxlen(&ifp->if_snd, 908 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD); 909 ifq_set_ready(&ifp->if_snd); 910 911 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 912 ifp->if_capabilities = IFCAP_HWCSUM | 913 IFCAP_TSO | 914 IFCAP_VLAN_MTU | 915 IFCAP_VLAN_HWTAGGING; 916 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN) 917 ifp->if_capabilities |= IFCAP_RSS; 918 ifp->if_capenable = ifp->if_capabilities; 919 920 /* 921 * Disable TXCSUM by default to improve bulk data 922 * transmit performance (+20Mbps improvement). 923 */ 924 ifp->if_capenable &= ~IFCAP_TXCSUM; 925 926 if (ifp->if_capenable & IFCAP_TXCSUM) 927 ifp->if_hwassist |= JME_CSUM_FEATURES; 928 ifp->if_hwassist |= CSUM_TSO; 929 930 /* Set up MII bus. */ 931 error = mii_phy_probe(dev, &sc->jme_miibus, 932 jme_mediachange, jme_mediastatus); 933 if (error) { 934 device_printf(dev, "no PHY found!\n"); 935 goto fail; 936 } 937 938 /* 939 * Save PHYADDR for FPGA mode PHY. 940 */ 941 if (sc->jme_caps & JME_CAP_FPGA) { 942 struct mii_data *mii = device_get_softc(sc->jme_miibus); 943 944 if (mii->mii_instance != 0) { 945 struct mii_softc *miisc; 946 947 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 948 if (miisc->mii_phy != 0) { 949 sc->jme_phyaddr = miisc->mii_phy; 950 break; 951 } 952 } 953 if (sc->jme_phyaddr != 0) { 954 device_printf(sc->jme_dev, 955 "FPGA PHY is at %d\n", sc->jme_phyaddr); 956 /* vendor magic. */ 957 jme_miibus_writereg(dev, sc->jme_phyaddr, 958 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 959 960 /* XXX should we clear JME_WA_EXTFIFO */ 961 } 962 } 963 } 964 965 ether_ifattach(ifp, eaddr, NULL); 966 967 /* Tell the upper layer(s) we support long frames. */ 968 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 969 970 error = jme_intr_setup(dev); 971 if (error) { 972 ether_ifdetach(ifp); 973 goto fail; 974 } 975 976 return 0; 977 fail: 978 jme_detach(dev); 979 return (error); 980 } 981 982 static int 983 jme_detach(device_t dev) 984 { 985 struct jme_softc *sc = device_get_softc(dev); 986 987 if (device_is_attached(dev)) { 988 struct ifnet *ifp = &sc->arpcom.ac_if; 989 990 ifnet_serialize_all(ifp); 991 jme_stop(sc); 992 jme_intr_teardown(dev); 993 ifnet_deserialize_all(ifp); 994 995 ether_ifdetach(ifp); 996 } 997 998 if (sc->jme_sysctl_tree != NULL) 999 sysctl_ctx_free(&sc->jme_sysctl_ctx); 1000 1001 if (sc->jme_miibus != NULL) 1002 device_delete_child(dev, sc->jme_miibus); 1003 bus_generic_detach(dev); 1004 1005 jme_intr_free(dev); 1006 1007 if (sc->jme_mem_res != NULL) { 1008 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 1009 sc->jme_mem_res); 1010 } 1011 1012 jme_dma_free(sc); 1013 1014 return (0); 1015 } 1016 1017 static void 1018 jme_sysctl_node(struct jme_softc *sc) 1019 { 1020 int coal_max; 1021 #ifdef JME_RSS_DEBUG 1022 int r; 1023 #endif 1024 1025 sysctl_ctx_init(&sc->jme_sysctl_ctx); 1026 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 1027 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1028 device_get_nameunit(sc->jme_dev), 1029 CTLFLAG_RD, 0, ""); 1030 if (sc->jme_sysctl_tree == NULL) { 1031 device_printf(sc->jme_dev, "can't add sysctl node\n"); 1032 return; 1033 } 1034 1035 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1036 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1037 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1038 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 1039 1040 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1041 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1042 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1043 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 1044 1045 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1046 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1047 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1048 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 1049 1050 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1051 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1052 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1053 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 1054 1055 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1056 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1057 "rx_desc_count", CTLFLAG_RD, 1058 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt, 1059 0, "RX desc count"); 1060 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1061 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1062 "tx_desc_count", CTLFLAG_RD, 1063 &sc->jme_cdata.jme_tx_desc_cnt, 1064 0, "TX desc count"); 1065 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1066 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1067 "rx_ring_count", CTLFLAG_RD, 1068 &sc->jme_cdata.jme_rx_ring_cnt, 1069 0, "RX ring count"); 1070 #ifdef JME_RSS_DEBUG 1071 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1072 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1073 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug, 1074 0, "RSS debug level"); 1075 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1076 char rx_ring_desc[32]; 1077 1078 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1079 "rx_ring%d_pkt", r); 1080 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1081 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1082 rx_ring_desc, CTLFLAG_RW, 1083 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets"); 1084 1085 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc), 1086 "rx_ring%d_emp", r); 1087 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx, 1088 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1089 rx_ring_desc, CTLFLAG_RW, 1090 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp, 1091 "# of time RX ring empty"); 1092 } 1093 #endif 1094 1095 /* 1096 * Set default coalesce valves 1097 */ 1098 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1099 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1100 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1101 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1102 1103 /* 1104 * Adjust coalesce valves, in case that the number of TX/RX 1105 * descs are set to small values by users. 1106 * 1107 * NOTE: coal_max will not be zero, since number of descs 1108 * must aligned by JME_NDESC_ALIGN (16 currently) 1109 */ 1110 coal_max = sc->jme_cdata.jme_tx_desc_cnt / 2; 1111 if (coal_max < sc->jme_tx_coal_pkt) 1112 sc->jme_tx_coal_pkt = coal_max; 1113 1114 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2; 1115 if (coal_max < sc->jme_rx_coal_pkt) 1116 sc->jme_rx_coal_pkt = coal_max; 1117 } 1118 1119 static int 1120 jme_dma_alloc(struct jme_softc *sc) 1121 { 1122 struct jme_txdesc *txd; 1123 bus_dmamem_t dmem; 1124 int error, i, asize; 1125 1126 sc->jme_cdata.jme_txdesc = 1127 kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc), 1128 M_DEVBUF, M_WAITOK | M_ZERO); 1129 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1130 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 1131 1132 rdata->jme_rxdesc = 1133 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc), 1134 M_DEVBUF, M_WAITOK | M_ZERO); 1135 } 1136 1137 /* Create parent ring tag. */ 1138 error = bus_dma_tag_create(NULL,/* parent */ 1139 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1140 sc->jme_lowaddr, /* lowaddr */ 1141 BUS_SPACE_MAXADDR, /* highaddr */ 1142 NULL, NULL, /* filter, filterarg */ 1143 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1144 0, /* nsegments */ 1145 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1146 0, /* flags */ 1147 &sc->jme_cdata.jme_ring_tag); 1148 if (error) { 1149 device_printf(sc->jme_dev, 1150 "could not create parent ring DMA tag.\n"); 1151 return error; 1152 } 1153 1154 /* 1155 * Create DMA stuffs for TX ring 1156 */ 1157 asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN); 1158 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1159 JME_TX_RING_ALIGN, 0, 1160 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1161 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1162 if (error) { 1163 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1164 return error; 1165 } 1166 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag; 1167 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map; 1168 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr; 1169 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr; 1170 1171 /* 1172 * Create DMA stuffs for RX rings 1173 */ 1174 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1175 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1176 if (error) 1177 return error; 1178 } 1179 1180 /* Create parent buffer tag. */ 1181 error = bus_dma_tag_create(NULL,/* parent */ 1182 1, 0, /* algnmnt, boundary */ 1183 sc->jme_lowaddr, /* lowaddr */ 1184 BUS_SPACE_MAXADDR, /* highaddr */ 1185 NULL, NULL, /* filter, filterarg */ 1186 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1187 0, /* nsegments */ 1188 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1189 0, /* flags */ 1190 &sc->jme_cdata.jme_buffer_tag); 1191 if (error) { 1192 device_printf(sc->jme_dev, 1193 "could not create parent buffer DMA tag.\n"); 1194 return error; 1195 } 1196 1197 /* 1198 * Create DMA stuffs for shadow status block 1199 */ 1200 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN); 1201 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1202 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1203 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1204 if (error) { 1205 device_printf(sc->jme_dev, 1206 "could not create shadow status block.\n"); 1207 return error; 1208 } 1209 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1210 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1211 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1212 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1213 1214 /* 1215 * Create DMA stuffs for TX buffers 1216 */ 1217 1218 /* Create tag for Tx buffers. */ 1219 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1220 1, 0, /* algnmnt, boundary */ 1221 BUS_SPACE_MAXADDR, /* lowaddr */ 1222 BUS_SPACE_MAXADDR, /* highaddr */ 1223 NULL, NULL, /* filter, filterarg */ 1224 JME_TSO_MAXSIZE, /* maxsize */ 1225 JME_MAXTXSEGS, /* nsegments */ 1226 JME_MAXSEGSIZE, /* maxsegsize */ 1227 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1228 &sc->jme_cdata.jme_tx_tag); 1229 if (error != 0) { 1230 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1231 return error; 1232 } 1233 1234 /* Create DMA maps for Tx buffers. */ 1235 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) { 1236 txd = &sc->jme_cdata.jme_txdesc[i]; 1237 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 1238 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1239 &txd->tx_dmamap); 1240 if (error) { 1241 int j; 1242 1243 device_printf(sc->jme_dev, 1244 "could not create %dth Tx dmamap.\n", i); 1245 1246 for (j = 0; j < i; ++j) { 1247 txd = &sc->jme_cdata.jme_txdesc[j]; 1248 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1249 txd->tx_dmamap); 1250 } 1251 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1252 sc->jme_cdata.jme_tx_tag = NULL; 1253 return error; 1254 } 1255 } 1256 1257 /* 1258 * Create DMA stuffs for RX buffers 1259 */ 1260 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) { 1261 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]); 1262 if (error) 1263 return error; 1264 } 1265 return 0; 1266 } 1267 1268 static void 1269 jme_dma_free(struct jme_softc *sc) 1270 { 1271 struct jme_txdesc *txd; 1272 struct jme_rxdesc *rxd; 1273 struct jme_rxdata *rdata; 1274 int i, r; 1275 1276 /* Tx ring */ 1277 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1278 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1279 sc->jme_cdata.jme_tx_ring_map); 1280 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1281 sc->jme_cdata.jme_tx_ring, 1282 sc->jme_cdata.jme_tx_ring_map); 1283 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1284 sc->jme_cdata.jme_tx_ring_tag = NULL; 1285 } 1286 1287 /* Rx ring */ 1288 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1289 rdata = &sc->jme_cdata.jme_rx_data[r]; 1290 if (rdata->jme_rx_ring_tag != NULL) { 1291 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1292 rdata->jme_rx_ring_map); 1293 bus_dmamem_free(rdata->jme_rx_ring_tag, 1294 rdata->jme_rx_ring, 1295 rdata->jme_rx_ring_map); 1296 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1297 rdata->jme_rx_ring_tag = NULL; 1298 } 1299 } 1300 1301 /* Tx buffers */ 1302 if (sc->jme_cdata.jme_tx_tag != NULL) { 1303 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) { 1304 txd = &sc->jme_cdata.jme_txdesc[i]; 1305 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1306 txd->tx_dmamap); 1307 } 1308 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1309 sc->jme_cdata.jme_tx_tag = NULL; 1310 } 1311 1312 /* Rx buffers */ 1313 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1314 rdata = &sc->jme_cdata.jme_rx_data[r]; 1315 if (rdata->jme_rx_tag != NULL) { 1316 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 1317 rxd = &rdata->jme_rxdesc[i]; 1318 bus_dmamap_destroy(rdata->jme_rx_tag, 1319 rxd->rx_dmamap); 1320 } 1321 bus_dmamap_destroy(rdata->jme_rx_tag, 1322 rdata->jme_rx_sparemap); 1323 bus_dma_tag_destroy(rdata->jme_rx_tag); 1324 rdata->jme_rx_tag = NULL; 1325 } 1326 } 1327 1328 /* Shadow status block. */ 1329 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1330 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1331 sc->jme_cdata.jme_ssb_map); 1332 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1333 sc->jme_cdata.jme_ssb_block, 1334 sc->jme_cdata.jme_ssb_map); 1335 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1336 sc->jme_cdata.jme_ssb_tag = NULL; 1337 } 1338 1339 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1340 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1341 sc->jme_cdata.jme_buffer_tag = NULL; 1342 } 1343 if (sc->jme_cdata.jme_ring_tag != NULL) { 1344 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1345 sc->jme_cdata.jme_ring_tag = NULL; 1346 } 1347 1348 if (sc->jme_cdata.jme_txdesc != NULL) { 1349 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF); 1350 sc->jme_cdata.jme_txdesc = NULL; 1351 } 1352 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 1353 rdata = &sc->jme_cdata.jme_rx_data[r]; 1354 if (rdata->jme_rxdesc != NULL) { 1355 kfree(rdata->jme_rxdesc, M_DEVBUF); 1356 rdata->jme_rxdesc = NULL; 1357 } 1358 } 1359 } 1360 1361 /* 1362 * Make sure the interface is stopped at reboot time. 1363 */ 1364 static int 1365 jme_shutdown(device_t dev) 1366 { 1367 return jme_suspend(dev); 1368 } 1369 1370 #ifdef notyet 1371 /* 1372 * Unlike other ethernet controllers, JMC250 requires 1373 * explicit resetting link speed to 10/100Mbps as gigabit 1374 * link will cunsume more power than 375mA. 1375 * Note, we reset the link speed to 10/100Mbps with 1376 * auto-negotiation but we don't know whether that operation 1377 * would succeed or not as we have no control after powering 1378 * off. If the renegotiation fail WOL may not work. Running 1379 * at 1Gbps draws more power than 375mA at 3.3V which is 1380 * specified in PCI specification and that would result in 1381 * complete shutdowning power to ethernet controller. 1382 * 1383 * TODO 1384 * Save current negotiated media speed/duplex/flow-control 1385 * to softc and restore the same link again after resuming. 1386 * PHY handling such as power down/resetting to 100Mbps 1387 * may be better handled in suspend method in phy driver. 1388 */ 1389 static void 1390 jme_setlinkspeed(struct jme_softc *sc) 1391 { 1392 struct mii_data *mii; 1393 int aneg, i; 1394 1395 JME_LOCK_ASSERT(sc); 1396 1397 mii = device_get_softc(sc->jme_miibus); 1398 mii_pollstat(mii); 1399 aneg = 0; 1400 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1401 switch IFM_SUBTYPE(mii->mii_media_active) { 1402 case IFM_10_T: 1403 case IFM_100_TX: 1404 return; 1405 case IFM_1000_T: 1406 aneg++; 1407 default: 1408 break; 1409 } 1410 } 1411 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1412 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1413 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1414 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1415 BMCR_AUTOEN | BMCR_STARTNEG); 1416 DELAY(1000); 1417 if (aneg != 0) { 1418 /* Poll link state until jme(4) get a 10/100 link. */ 1419 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1420 mii_pollstat(mii); 1421 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1422 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1423 case IFM_10_T: 1424 case IFM_100_TX: 1425 jme_mac_config(sc); 1426 return; 1427 default: 1428 break; 1429 } 1430 } 1431 JME_UNLOCK(sc); 1432 pause("jmelnk", hz); 1433 JME_LOCK(sc); 1434 } 1435 if (i == MII_ANEGTICKS_GIGE) 1436 device_printf(sc->jme_dev, "establishing link failed, " 1437 "WOL may not work!"); 1438 } 1439 /* 1440 * No link, force MAC to have 100Mbps, full-duplex link. 1441 * This is the last resort and may/may not work. 1442 */ 1443 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1444 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1445 jme_mac_config(sc); 1446 } 1447 1448 static void 1449 jme_setwol(struct jme_softc *sc) 1450 { 1451 struct ifnet *ifp = &sc->arpcom.ac_if; 1452 uint32_t gpr, pmcs; 1453 uint16_t pmstat; 1454 int pmc; 1455 1456 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1457 /* No PME capability, PHY power down. */ 1458 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1459 MII_BMCR, BMCR_PDOWN); 1460 return; 1461 } 1462 1463 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1464 pmcs = CSR_READ_4(sc, JME_PMCS); 1465 pmcs &= ~PMCS_WOL_ENB_MASK; 1466 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1467 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1468 /* Enable PME message. */ 1469 gpr |= GPREG0_PME_ENB; 1470 /* For gigabit controllers, reset link speed to 10/100. */ 1471 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1472 jme_setlinkspeed(sc); 1473 } 1474 1475 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1476 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1477 1478 /* Request PME. */ 1479 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1480 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1481 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1482 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1483 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1484 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1485 /* No WOL, PHY power down. */ 1486 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1487 MII_BMCR, BMCR_PDOWN); 1488 } 1489 } 1490 #endif 1491 1492 static int 1493 jme_suspend(device_t dev) 1494 { 1495 struct jme_softc *sc = device_get_softc(dev); 1496 struct ifnet *ifp = &sc->arpcom.ac_if; 1497 1498 ifnet_serialize_all(ifp); 1499 jme_stop(sc); 1500 #ifdef notyet 1501 jme_setwol(sc); 1502 #endif 1503 ifnet_deserialize_all(ifp); 1504 1505 return (0); 1506 } 1507 1508 static int 1509 jme_resume(device_t dev) 1510 { 1511 struct jme_softc *sc = device_get_softc(dev); 1512 struct ifnet *ifp = &sc->arpcom.ac_if; 1513 #ifdef notyet 1514 int pmc; 1515 #endif 1516 1517 ifnet_serialize_all(ifp); 1518 1519 #ifdef notyet 1520 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1521 uint16_t pmstat; 1522 1523 pmstat = pci_read_config(sc->jme_dev, 1524 pmc + PCIR_POWER_STATUS, 2); 1525 /* Disable PME clear PME status. */ 1526 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1527 pci_write_config(sc->jme_dev, 1528 pmc + PCIR_POWER_STATUS, pmstat, 2); 1529 } 1530 #endif 1531 1532 if (ifp->if_flags & IFF_UP) 1533 jme_init(sc); 1534 1535 ifnet_deserialize_all(ifp); 1536 1537 return (0); 1538 } 1539 1540 static __inline int 1541 jme_tso_pullup(struct mbuf **mp) 1542 { 1543 int hoff, iphlen, thoff; 1544 struct mbuf *m; 1545 1546 m = *mp; 1547 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 1548 1549 iphlen = m->m_pkthdr.csum_iphlen; 1550 thoff = m->m_pkthdr.csum_thlen; 1551 hoff = m->m_pkthdr.csum_lhlen; 1552 1553 KASSERT(iphlen > 0, ("invalid ip hlen")); 1554 KASSERT(thoff > 0, ("invalid tcp hlen")); 1555 KASSERT(hoff > 0, ("invalid ether hlen")); 1556 1557 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 1558 m = m_pullup(m, hoff + iphlen + thoff); 1559 if (m == NULL) { 1560 *mp = NULL; 1561 return ENOBUFS; 1562 } 1563 *mp = m; 1564 } 1565 return 0; 1566 } 1567 1568 static int 1569 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1570 { 1571 struct jme_txdesc *txd; 1572 struct jme_desc *desc; 1573 struct mbuf *m; 1574 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1575 int maxsegs, nsegs; 1576 int error, i, prod, symbol_desc; 1577 uint32_t cflags, flag64, mss; 1578 1579 M_ASSERTPKTHDR((*m_head)); 1580 1581 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) { 1582 /* XXX Is this necessary? */ 1583 error = jme_tso_pullup(m_head); 1584 if (error) 1585 return error; 1586 } 1587 1588 prod = sc->jme_cdata.jme_tx_prod; 1589 txd = &sc->jme_cdata.jme_txdesc[prod]; 1590 1591 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1592 symbol_desc = 1; 1593 else 1594 symbol_desc = 0; 1595 1596 maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) - 1597 (JME_TXD_RSVD + symbol_desc); 1598 if (maxsegs > JME_MAXTXSEGS) 1599 maxsegs = JME_MAXTXSEGS; 1600 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc), 1601 ("not enough segments %d", maxsegs)); 1602 1603 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag, 1604 txd->tx_dmamap, m_head, 1605 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1606 if (error) 1607 goto fail; 1608 1609 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1610 BUS_DMASYNC_PREWRITE); 1611 1612 m = *m_head; 1613 cflags = 0; 1614 mss = 0; 1615 1616 /* Configure checksum offload. */ 1617 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1618 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT; 1619 cflags |= JME_TD_TSO; 1620 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) { 1621 if (m->m_pkthdr.csum_flags & CSUM_IP) 1622 cflags |= JME_TD_IPCSUM; 1623 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1624 cflags |= JME_TD_TCPCSUM; 1625 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1626 cflags |= JME_TD_UDPCSUM; 1627 } 1628 1629 /* Configure VLAN. */ 1630 if (m->m_flags & M_VLANTAG) { 1631 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1632 cflags |= JME_TD_VLAN_TAG; 1633 } 1634 1635 desc = &sc->jme_cdata.jme_tx_ring[prod]; 1636 desc->flags = htole32(cflags); 1637 desc->addr_hi = htole32(m->m_pkthdr.len); 1638 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1639 /* 1640 * Use 64bits TX desc chain format. 1641 * 1642 * The first TX desc of the chain, which is setup here, 1643 * is just a symbol TX desc carrying no payload. 1644 */ 1645 flag64 = JME_TD_64BIT; 1646 desc->buflen = htole32(mss); 1647 desc->addr_lo = 0; 1648 1649 /* No effective TX desc is consumed */ 1650 i = 0; 1651 } else { 1652 /* 1653 * Use 32bits TX desc chain format. 1654 * 1655 * The first TX desc of the chain, which is setup here, 1656 * is an effective TX desc carrying the first segment of 1657 * the mbuf chain. 1658 */ 1659 flag64 = 0; 1660 desc->buflen = htole32(mss | txsegs[0].ds_len); 1661 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1662 1663 /* One effective TX desc is consumed */ 1664 i = 1; 1665 } 1666 sc->jme_cdata.jme_tx_cnt++; 1667 KKASSERT(sc->jme_cdata.jme_tx_cnt - i < 1668 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD); 1669 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt); 1670 1671 txd->tx_ndesc = 1 - i; 1672 for (; i < nsegs; i++) { 1673 desc = &sc->jme_cdata.jme_tx_ring[prod]; 1674 desc->buflen = htole32(txsegs[i].ds_len); 1675 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1676 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1677 desc->flags = htole32(JME_TD_OWN | flag64); 1678 1679 sc->jme_cdata.jme_tx_cnt++; 1680 KKASSERT(sc->jme_cdata.jme_tx_cnt <= 1681 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD); 1682 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt); 1683 } 1684 1685 /* Update producer index. */ 1686 sc->jme_cdata.jme_tx_prod = prod; 1687 /* 1688 * Finally request interrupt and give the first descriptor 1689 * owenership to hardware. 1690 */ 1691 desc = txd->tx_desc; 1692 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1693 1694 txd->tx_m = m; 1695 txd->tx_ndesc += nsegs; 1696 1697 return 0; 1698 fail: 1699 m_freem(*m_head); 1700 *m_head = NULL; 1701 return error; 1702 } 1703 1704 static void 1705 jme_start(struct ifnet *ifp) 1706 { 1707 struct jme_softc *sc = ifp->if_softc; 1708 struct mbuf *m_head; 1709 int enq = 0; 1710 1711 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize); 1712 1713 if (!sc->jme_has_link) { 1714 ifq_purge(&ifp->if_snd); 1715 return; 1716 } 1717 1718 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1719 return; 1720 1721 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc)) 1722 jme_txeof(sc); 1723 1724 while (!ifq_is_empty(&ifp->if_snd)) { 1725 /* 1726 * Check number of available TX descs, always 1727 * leave JME_TXD_RSVD free TX descs. 1728 */ 1729 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE > 1730 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) { 1731 ifp->if_flags |= IFF_OACTIVE; 1732 break; 1733 } 1734 1735 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1736 if (m_head == NULL) 1737 break; 1738 1739 /* 1740 * Pack the data into the transmit ring. If we 1741 * don't have room, set the OACTIVE flag and wait 1742 * for the NIC to drain the ring. 1743 */ 1744 if (jme_encap(sc, &m_head)) { 1745 KKASSERT(m_head == NULL); 1746 ifp->if_oerrors++; 1747 ifp->if_flags |= IFF_OACTIVE; 1748 break; 1749 } 1750 enq++; 1751 1752 /* 1753 * If there's a BPF listener, bounce a copy of this frame 1754 * to him. 1755 */ 1756 ETHER_BPF_MTAP(ifp, m_head); 1757 } 1758 1759 if (enq > 0) { 1760 /* 1761 * Reading TXCSR takes very long time under heavy load 1762 * so cache TXCSR value and writes the ORed value with 1763 * the kick command to the TXCSR. This saves one register 1764 * access cycle. 1765 */ 1766 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1767 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1768 /* Set a timeout in case the chip goes out to lunch. */ 1769 ifp->if_timer = JME_TX_TIMEOUT; 1770 } 1771 } 1772 1773 static void 1774 jme_watchdog(struct ifnet *ifp) 1775 { 1776 struct jme_softc *sc = ifp->if_softc; 1777 1778 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1779 1780 if (!sc->jme_has_link) { 1781 if_printf(ifp, "watchdog timeout (missed link)\n"); 1782 ifp->if_oerrors++; 1783 jme_init(sc); 1784 return; 1785 } 1786 1787 jme_txeof(sc); 1788 if (sc->jme_cdata.jme_tx_cnt == 0) { 1789 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1790 "-- recovering\n"); 1791 if (!ifq_is_empty(&ifp->if_snd)) 1792 if_devstart(ifp); 1793 return; 1794 } 1795 1796 if_printf(ifp, "watchdog timeout\n"); 1797 ifp->if_oerrors++; 1798 jme_init(sc); 1799 if (!ifq_is_empty(&ifp->if_snd)) 1800 if_devstart(ifp); 1801 } 1802 1803 static int 1804 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1805 { 1806 struct jme_softc *sc = ifp->if_softc; 1807 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1808 struct ifreq *ifr = (struct ifreq *)data; 1809 int error = 0, mask; 1810 1811 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1812 1813 switch (cmd) { 1814 case SIOCSIFMTU: 1815 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1816 (!(sc->jme_caps & JME_CAP_JUMBO) && 1817 ifr->ifr_mtu > JME_MAX_MTU)) { 1818 error = EINVAL; 1819 break; 1820 } 1821 1822 if (ifp->if_mtu != ifr->ifr_mtu) { 1823 /* 1824 * No special configuration is required when interface 1825 * MTU is changed but availability of Tx checksum 1826 * offload should be chcked against new MTU size as 1827 * FIFO size is just 2K. 1828 */ 1829 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1830 ifp->if_capenable &= 1831 ~(IFCAP_TXCSUM | IFCAP_TSO); 1832 ifp->if_hwassist &= 1833 ~(JME_CSUM_FEATURES | CSUM_TSO); 1834 } 1835 ifp->if_mtu = ifr->ifr_mtu; 1836 if (ifp->if_flags & IFF_RUNNING) 1837 jme_init(sc); 1838 } 1839 break; 1840 1841 case SIOCSIFFLAGS: 1842 if (ifp->if_flags & IFF_UP) { 1843 if (ifp->if_flags & IFF_RUNNING) { 1844 if ((ifp->if_flags ^ sc->jme_if_flags) & 1845 (IFF_PROMISC | IFF_ALLMULTI)) 1846 jme_set_filter(sc); 1847 } else { 1848 jme_init(sc); 1849 } 1850 } else { 1851 if (ifp->if_flags & IFF_RUNNING) 1852 jme_stop(sc); 1853 } 1854 sc->jme_if_flags = ifp->if_flags; 1855 break; 1856 1857 case SIOCADDMULTI: 1858 case SIOCDELMULTI: 1859 if (ifp->if_flags & IFF_RUNNING) 1860 jme_set_filter(sc); 1861 break; 1862 1863 case SIOCSIFMEDIA: 1864 case SIOCGIFMEDIA: 1865 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1866 break; 1867 1868 case SIOCSIFCAP: 1869 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1870 1871 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1872 ifp->if_capenable ^= IFCAP_TXCSUM; 1873 if (ifp->if_capenable & IFCAP_TXCSUM) 1874 ifp->if_hwassist |= JME_CSUM_FEATURES; 1875 else 1876 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1877 } 1878 if (mask & IFCAP_RXCSUM) { 1879 uint32_t reg; 1880 1881 ifp->if_capenable ^= IFCAP_RXCSUM; 1882 reg = CSR_READ_4(sc, JME_RXMAC); 1883 reg &= ~RXMAC_CSUM_ENB; 1884 if (ifp->if_capenable & IFCAP_RXCSUM) 1885 reg |= RXMAC_CSUM_ENB; 1886 CSR_WRITE_4(sc, JME_RXMAC, reg); 1887 } 1888 1889 if (mask & IFCAP_VLAN_HWTAGGING) { 1890 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1891 jme_set_vlan(sc); 1892 } 1893 1894 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1895 ifp->if_capenable ^= IFCAP_TSO; 1896 if (ifp->if_capenable & IFCAP_TSO) 1897 ifp->if_hwassist |= CSUM_TSO; 1898 else 1899 ifp->if_hwassist &= ~CSUM_TSO; 1900 } 1901 1902 if (mask & IFCAP_RSS) 1903 ifp->if_capenable ^= IFCAP_RSS; 1904 break; 1905 1906 default: 1907 error = ether_ioctl(ifp, cmd, data); 1908 break; 1909 } 1910 return (error); 1911 } 1912 1913 static void 1914 jme_mac_config(struct jme_softc *sc) 1915 { 1916 struct mii_data *mii; 1917 uint32_t ghc, rxmac, txmac, txpause, gp1; 1918 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1919 1920 mii = device_get_softc(sc->jme_miibus); 1921 1922 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1923 DELAY(10); 1924 CSR_WRITE_4(sc, JME_GHC, 0); 1925 ghc = 0; 1926 rxmac = CSR_READ_4(sc, JME_RXMAC); 1927 rxmac &= ~RXMAC_FC_ENB; 1928 txmac = CSR_READ_4(sc, JME_TXMAC); 1929 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1930 txpause = CSR_READ_4(sc, JME_TXPFC); 1931 txpause &= ~TXPFC_PAUSE_ENB; 1932 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1933 ghc |= GHC_FULL_DUPLEX; 1934 rxmac &= ~RXMAC_COLL_DET_ENB; 1935 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1936 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1937 TXMAC_FRAME_BURST); 1938 #ifdef notyet 1939 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1940 txpause |= TXPFC_PAUSE_ENB; 1941 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1942 rxmac |= RXMAC_FC_ENB; 1943 #endif 1944 /* Disable retry transmit timer/retry limit. */ 1945 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1946 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1947 } else { 1948 rxmac |= RXMAC_COLL_DET_ENB; 1949 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1950 /* Enable retry transmit timer/retry limit. */ 1951 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1952 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1953 } 1954 1955 /* 1956 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1957 */ 1958 gp1 = CSR_READ_4(sc, JME_GPREG1); 1959 gp1 &= ~GPREG1_WA_HDX; 1960 1961 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1962 hdx = 1; 1963 1964 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1965 case IFM_10_T: 1966 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 1967 if (hdx) 1968 gp1 |= GPREG1_WA_HDX; 1969 break; 1970 1971 case IFM_100_TX: 1972 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 1973 if (hdx) 1974 gp1 |= GPREG1_WA_HDX; 1975 1976 /* 1977 * Use extended FIFO depth to workaround CRC errors 1978 * emitted by chips before JMC250B 1979 */ 1980 phyconf = JMPHY_CONF_EXTFIFO; 1981 break; 1982 1983 case IFM_1000_T: 1984 if (sc->jme_caps & JME_CAP_FASTETH) 1985 break; 1986 1987 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 1988 if (hdx) 1989 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1990 break; 1991 1992 default: 1993 break; 1994 } 1995 CSR_WRITE_4(sc, JME_GHC, ghc); 1996 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1997 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1998 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1999 2000 if (sc->jme_workaround & JME_WA_EXTFIFO) { 2001 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2002 JMPHY_CONF, phyconf); 2003 } 2004 if (sc->jme_workaround & JME_WA_HDX) 2005 CSR_WRITE_4(sc, JME_GPREG1, gp1); 2006 } 2007 2008 static void 2009 jme_intr(void *xsc) 2010 { 2011 struct jme_softc *sc = xsc; 2012 struct ifnet *ifp = &sc->arpcom.ac_if; 2013 uint32_t status; 2014 int r; 2015 2016 ASSERT_SERIALIZED(&sc->jme_serialize); 2017 2018 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2019 if (status == 0 || status == 0xFFFFFFFF) 2020 return; 2021 2022 /* Disable interrupts. */ 2023 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2024 2025 status = CSR_READ_4(sc, JME_INTR_STATUS); 2026 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2027 goto back; 2028 2029 /* Reset PCC counter/timer and Ack interrupts. */ 2030 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2031 2032 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 2033 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2034 2035 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2036 if (status & jme_rx_status[r].jme_coal) { 2037 status |= jme_rx_status[r].jme_coal | 2038 jme_rx_status[r].jme_comp; 2039 } 2040 } 2041 2042 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2043 2044 if (ifp->if_flags & IFF_RUNNING) { 2045 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2046 jme_rx_intr(sc, status); 2047 2048 if (status & INTR_RXQ_DESC_EMPTY) { 2049 /* 2050 * Notify hardware availability of new Rx buffers. 2051 * Reading RXCSR takes very long time under heavy 2052 * load so cache RXCSR value and writes the ORed 2053 * value with the kick command to the RXCSR. This 2054 * saves one register access cycle. 2055 */ 2056 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2057 RXCSR_RX_ENB | RXCSR_RXQ_START); 2058 } 2059 2060 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 2061 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize); 2062 jme_txeof(sc); 2063 if (!ifq_is_empty(&ifp->if_snd)) 2064 if_devstart(ifp); 2065 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize); 2066 } 2067 } 2068 back: 2069 /* Reenable interrupts. */ 2070 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2071 } 2072 2073 static void 2074 jme_txeof(struct jme_softc *sc) 2075 { 2076 struct ifnet *ifp = &sc->arpcom.ac_if; 2077 int cons; 2078 2079 cons = sc->jme_cdata.jme_tx_cons; 2080 if (cons == sc->jme_cdata.jme_tx_prod) 2081 return; 2082 2083 /* 2084 * Go through our Tx list and free mbufs for those 2085 * frames which have been transmitted. 2086 */ 2087 while (cons != sc->jme_cdata.jme_tx_prod) { 2088 struct jme_txdesc *txd, *next_txd; 2089 uint32_t status, next_status; 2090 int next_cons, nsegs; 2091 2092 txd = &sc->jme_cdata.jme_txdesc[cons]; 2093 KASSERT(txd->tx_m != NULL, 2094 ("%s: freeing NULL mbuf!", __func__)); 2095 2096 status = le32toh(txd->tx_desc->flags); 2097 if ((status & JME_TD_OWN) == JME_TD_OWN) 2098 break; 2099 2100 /* 2101 * NOTE: 2102 * This chip will always update the TX descriptor's 2103 * buflen field and this updating always happens 2104 * after clearing the OWN bit, so even if the OWN 2105 * bit is cleared by the chip, we still don't sure 2106 * about whether the buflen field has been updated 2107 * by the chip or not. To avoid this race, we wait 2108 * for the next TX descriptor's OWN bit to be cleared 2109 * by the chip before reusing this TX descriptor. 2110 */ 2111 next_cons = cons; 2112 JME_DESC_ADD(next_cons, txd->tx_ndesc, 2113 sc->jme_cdata.jme_tx_desc_cnt); 2114 next_txd = &sc->jme_cdata.jme_txdesc[next_cons]; 2115 if (next_txd->tx_m == NULL) 2116 break; 2117 next_status = le32toh(next_txd->tx_desc->flags); 2118 if ((next_status & JME_TD_OWN) == JME_TD_OWN) 2119 break; 2120 2121 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2122 ifp->if_oerrors++; 2123 } else { 2124 ifp->if_opackets++; 2125 if (status & JME_TD_COLLISION) { 2126 ifp->if_collisions += 2127 le32toh(txd->tx_desc->buflen) & 2128 JME_TD_BUF_LEN_MASK; 2129 } 2130 } 2131 2132 /* 2133 * Only the first descriptor of multi-descriptor 2134 * transmission is updated so driver have to skip entire 2135 * chained buffers for the transmiited frame. In other 2136 * words, JME_TD_OWN bit is valid only at the first 2137 * descriptor of a multi-descriptor transmission. 2138 */ 2139 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2140 sc->jme_cdata.jme_tx_ring[cons].flags = 0; 2141 JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt); 2142 } 2143 2144 /* Reclaim transferred mbufs. */ 2145 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2146 m_freem(txd->tx_m); 2147 txd->tx_m = NULL; 2148 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2149 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2150 ("%s: Active Tx desc counter was garbled", __func__)); 2151 txd->tx_ndesc = 0; 2152 } 2153 sc->jme_cdata.jme_tx_cons = cons; 2154 2155 /* 1 for symbol TX descriptor */ 2156 if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1) 2157 ifp->if_timer = 0; 2158 2159 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <= 2160 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) 2161 ifp->if_flags &= ~IFF_OACTIVE; 2162 } 2163 2164 static __inline void 2165 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count) 2166 { 2167 int i; 2168 2169 for (i = 0; i < count; ++i) { 2170 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]); 2171 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2172 } 2173 } 2174 2175 static __inline struct pktinfo * 2176 jme_pktinfo(struct pktinfo *pi, uint32_t flags) 2177 { 2178 if (flags & JME_RD_IPV4) 2179 pi->pi_netisr = NETISR_IP; 2180 else if (flags & JME_RD_IPV6) 2181 pi->pi_netisr = NETISR_IPV6; 2182 else 2183 return NULL; 2184 2185 pi->pi_flags = 0; 2186 pi->pi_l3proto = IPPROTO_UNKNOWN; 2187 2188 if (flags & JME_RD_MORE_FRAG) 2189 pi->pi_flags |= PKTINFO_FLAG_FRAG; 2190 else if (flags & JME_RD_TCP) 2191 pi->pi_l3proto = IPPROTO_TCP; 2192 else if (flags & JME_RD_UDP) 2193 pi->pi_l3proto = IPPROTO_UDP; 2194 else 2195 pi = NULL; 2196 return pi; 2197 } 2198 2199 /* Receive a frame. */ 2200 static void 2201 jme_rxpkt(struct jme_rxdata *rdata) 2202 { 2203 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if; 2204 struct jme_desc *desc; 2205 struct jme_rxdesc *rxd; 2206 struct mbuf *mp, *m; 2207 uint32_t flags, status, hash, hashinfo; 2208 int cons, count, nsegs; 2209 2210 cons = rdata->jme_rx_cons; 2211 desc = &rdata->jme_rx_ring[cons]; 2212 2213 flags = le32toh(desc->flags); 2214 status = le32toh(desc->buflen); 2215 hash = le32toh(desc->addr_hi); 2216 hashinfo = le32toh(desc->addr_lo); 2217 nsegs = JME_RX_NSEGS(status); 2218 2219 if (nsegs > 1) { 2220 /* Skip the first descriptor. */ 2221 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt); 2222 2223 /* 2224 * Clear the OWN bit of the following RX descriptors; 2225 * hardware will not clear the OWN bit except the first 2226 * RX descriptor. 2227 * 2228 * Since the first RX descriptor is setup, i.e. OWN bit 2229 * on, before its followins RX descriptors, leaving the 2230 * OWN bit on the following RX descriptors will trick 2231 * the hardware into thinking that the following RX 2232 * descriptors are ready to be used too. 2233 */ 2234 for (count = 1; count < nsegs; count++, 2235 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) 2236 rdata->jme_rx_ring[cons].flags = 0; 2237 2238 cons = rdata->jme_rx_cons; 2239 } 2240 2241 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, " 2242 "hash 0x%08x, hash info 0x%08x\n", 2243 rdata->jme_rx_idx, flags, hash, hashinfo); 2244 2245 if (status & JME_RX_ERR_STAT) { 2246 ifp->if_ierrors++; 2247 jme_discard_rxbufs(rdata, cons, nsegs); 2248 #ifdef JME_SHOW_ERRORS 2249 if_printf(ifp, "%s : receive error = 0x%b\n", 2250 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2251 #endif 2252 rdata->jme_rx_cons += nsegs; 2253 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2254 return; 2255 } 2256 2257 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2258 for (count = 0; count < nsegs; count++, 2259 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) { 2260 rxd = &rdata->jme_rxdesc[cons]; 2261 mp = rxd->rx_m; 2262 2263 /* Add a new receive buffer to the ring. */ 2264 if (jme_newbuf(rdata, rxd, 0) != 0) { 2265 ifp->if_iqdrops++; 2266 /* Reuse buffer. */ 2267 jme_discard_rxbufs(rdata, cons, nsegs - count); 2268 if (rdata->jme_rxhead != NULL) { 2269 m_freem(rdata->jme_rxhead); 2270 JME_RXCHAIN_RESET(rdata); 2271 } 2272 break; 2273 } 2274 2275 /* 2276 * Assume we've received a full sized frame. 2277 * Actual size is fixed when we encounter the end of 2278 * multi-segmented frame. 2279 */ 2280 mp->m_len = MCLBYTES; 2281 2282 /* Chain received mbufs. */ 2283 if (rdata->jme_rxhead == NULL) { 2284 rdata->jme_rxhead = mp; 2285 rdata->jme_rxtail = mp; 2286 } else { 2287 /* 2288 * Receive processor can receive a maximum frame 2289 * size of 65535 bytes. 2290 */ 2291 rdata->jme_rxtail->m_next = mp; 2292 rdata->jme_rxtail = mp; 2293 } 2294 2295 if (count == nsegs - 1) { 2296 struct pktinfo pi0, *pi; 2297 2298 /* Last desc. for this frame. */ 2299 m = rdata->jme_rxhead; 2300 m->m_pkthdr.len = rdata->jme_rxlen; 2301 if (nsegs > 1) { 2302 /* Set first mbuf size. */ 2303 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2304 /* Set last mbuf size. */ 2305 mp->m_len = rdata->jme_rxlen - 2306 ((MCLBYTES - JME_RX_PAD_BYTES) + 2307 (MCLBYTES * (nsegs - 2))); 2308 } else { 2309 m->m_len = rdata->jme_rxlen; 2310 } 2311 m->m_pkthdr.rcvif = ifp; 2312 2313 /* 2314 * Account for 10bytes auto padding which is used 2315 * to align IP header on 32bit boundary. Also note, 2316 * CRC bytes is automatically removed by the 2317 * hardware. 2318 */ 2319 m->m_data += JME_RX_PAD_BYTES; 2320 2321 /* Set checksum information. */ 2322 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2323 (flags & JME_RD_IPV4)) { 2324 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2325 if (flags & JME_RD_IPCSUM) 2326 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2327 if ((flags & JME_RD_MORE_FRAG) == 0 && 2328 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2329 (JME_RD_TCP | JME_RD_TCPCSUM) || 2330 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2331 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2332 m->m_pkthdr.csum_flags |= 2333 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2334 m->m_pkthdr.csum_data = 0xffff; 2335 } 2336 } 2337 2338 /* Check for VLAN tagged packets. */ 2339 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2340 (flags & JME_RD_VLAN_TAG)) { 2341 m->m_pkthdr.ether_vlantag = 2342 flags & JME_RD_VLAN_MASK; 2343 m->m_flags |= M_VLANTAG; 2344 } 2345 2346 ifp->if_ipackets++; 2347 2348 if (ifp->if_capenable & IFCAP_RSS) 2349 pi = jme_pktinfo(&pi0, flags); 2350 else 2351 pi = NULL; 2352 2353 if (pi != NULL && 2354 (hashinfo & JME_RD_HASH_FN_MASK) == 2355 JME_RD_HASH_FN_TOEPLITZ) { 2356 m->m_flags |= (M_HASH | M_CKHASH); 2357 m->m_pkthdr.hash = toeplitz_hash(hash); 2358 } 2359 2360 #ifdef JME_RSS_DEBUG 2361 if (pi != NULL) { 2362 JME_RSS_DPRINTF(rdata->jme_sc, 10, 2363 "isr %d flags %08x, l3 %d %s\n", 2364 pi->pi_netisr, pi->pi_flags, 2365 pi->pi_l3proto, 2366 (m->m_flags & M_HASH) ? "hash" : ""); 2367 } 2368 #endif 2369 2370 /* Pass it on. */ 2371 ether_input_pkt(ifp, m, pi); 2372 2373 /* Reset mbuf chains. */ 2374 JME_RXCHAIN_RESET(rdata); 2375 #ifdef JME_RSS_DEBUG 2376 rdata->jme_rx_pkt++; 2377 #endif 2378 } 2379 } 2380 2381 rdata->jme_rx_cons += nsegs; 2382 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt; 2383 } 2384 2385 static void 2386 jme_rxeof(struct jme_rxdata *rdata, int count) 2387 { 2388 struct jme_desc *desc; 2389 int nsegs, pktlen; 2390 2391 for (;;) { 2392 #ifdef DEVICE_POLLING 2393 if (count >= 0 && count-- == 0) 2394 break; 2395 #endif 2396 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2397 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2398 break; 2399 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2400 break; 2401 2402 /* 2403 * Check number of segments against received bytes. 2404 * Non-matching value would indicate that hardware 2405 * is still trying to update Rx descriptors. I'm not 2406 * sure whether this check is needed. 2407 */ 2408 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2409 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2410 if (nsegs != howmany(pktlen, MCLBYTES)) { 2411 if_printf(&rdata->jme_sc->arpcom.ac_if, 2412 "RX fragment count(%d) and " 2413 "packet size(%d) mismach\n", nsegs, pktlen); 2414 break; 2415 } 2416 2417 /* 2418 * NOTE: 2419 * RSS hash and hash information may _not_ be set by the 2420 * hardware even if the OWN bit is cleared and VALID bit 2421 * is set. 2422 * 2423 * If the RSS information is not delivered by the hardware 2424 * yet, we MUST NOT accept this packet, let alone reusing 2425 * its RX descriptor. If this packet was accepted and its 2426 * RX descriptor was reused before hardware delivering the 2427 * RSS information, the RX buffer's address would be trashed 2428 * by the RSS information delivered by the hardware. 2429 */ 2430 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 2431 struct jme_rxdesc *rxd; 2432 uint32_t hashinfo; 2433 2434 hashinfo = le32toh(desc->addr_lo); 2435 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons]; 2436 2437 /* 2438 * This test should be enough to detect the pending 2439 * RSS information delivery, given: 2440 * - If RSS hash is not calculated, the hashinfo 2441 * will be 0. Howvever, the lower 32bits of RX 2442 * buffers' physical address will never be 0. 2443 * (see jme_rxbuf_dma_filter) 2444 * - If RSS hash is calculated, the lowest 4 bits 2445 * of hashinfo will be set, while the RX buffers 2446 * are at least 2K aligned. 2447 */ 2448 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) { 2449 #ifdef JME_SHOW_RSSWB 2450 if_printf(&rdata->jme_sc->arpcom.ac_if, 2451 "RSS is not written back yet\n"); 2452 #endif 2453 break; 2454 } 2455 } 2456 2457 /* Received a frame. */ 2458 jme_rxpkt(rdata); 2459 } 2460 } 2461 2462 static void 2463 jme_tick(void *xsc) 2464 { 2465 struct jme_softc *sc = xsc; 2466 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2467 2468 lwkt_serialize_enter(&sc->jme_serialize); 2469 2470 sc->jme_in_tick = TRUE; 2471 mii_tick(mii); 2472 sc->jme_in_tick = FALSE; 2473 2474 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2475 2476 lwkt_serialize_exit(&sc->jme_serialize); 2477 } 2478 2479 static void 2480 jme_reset(struct jme_softc *sc) 2481 { 2482 uint32_t val; 2483 2484 /* Make sure that TX and RX are stopped */ 2485 jme_stop_tx(sc); 2486 jme_stop_rx(sc); 2487 2488 /* Start reset */ 2489 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2490 DELAY(20); 2491 2492 /* 2493 * Hold reset bit before stop reset 2494 */ 2495 2496 /* Disable TXMAC and TXOFL clock sources */ 2497 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2498 /* Disable RXMAC clock source */ 2499 val = CSR_READ_4(sc, JME_GPREG1); 2500 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2501 /* Flush */ 2502 CSR_READ_4(sc, JME_GHC); 2503 2504 /* Stop reset */ 2505 CSR_WRITE_4(sc, JME_GHC, 0); 2506 /* Flush */ 2507 CSR_READ_4(sc, JME_GHC); 2508 2509 /* 2510 * Clear reset bit after stop reset 2511 */ 2512 2513 /* Enable TXMAC and TXOFL clock sources */ 2514 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2515 /* Enable RXMAC clock source */ 2516 val = CSR_READ_4(sc, JME_GPREG1); 2517 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2518 /* Flush */ 2519 CSR_READ_4(sc, JME_GHC); 2520 2521 /* Disable TXMAC and TXOFL clock sources */ 2522 CSR_WRITE_4(sc, JME_GHC, 0); 2523 /* Disable RXMAC clock source */ 2524 val = CSR_READ_4(sc, JME_GPREG1); 2525 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2526 /* Flush */ 2527 CSR_READ_4(sc, JME_GHC); 2528 2529 /* Enable TX and RX */ 2530 val = CSR_READ_4(sc, JME_TXCSR); 2531 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB); 2532 val = CSR_READ_4(sc, JME_RXCSR); 2533 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB); 2534 /* Flush */ 2535 CSR_READ_4(sc, JME_TXCSR); 2536 CSR_READ_4(sc, JME_RXCSR); 2537 2538 /* Enable TXMAC and TXOFL clock sources */ 2539 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2540 /* Eisable RXMAC clock source */ 2541 val = CSR_READ_4(sc, JME_GPREG1); 2542 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2543 /* Flush */ 2544 CSR_READ_4(sc, JME_GHC); 2545 2546 /* Stop TX and RX */ 2547 jme_stop_tx(sc); 2548 jme_stop_rx(sc); 2549 } 2550 2551 static void 2552 jme_init(void *xsc) 2553 { 2554 struct jme_softc *sc = xsc; 2555 struct ifnet *ifp = &sc->arpcom.ac_if; 2556 struct mii_data *mii; 2557 uint8_t eaddr[ETHER_ADDR_LEN]; 2558 bus_addr_t paddr; 2559 uint32_t reg; 2560 int error, r; 2561 2562 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2563 2564 /* 2565 * Cancel any pending I/O. 2566 */ 2567 jme_stop(sc); 2568 2569 /* 2570 * Reset the chip to a known state. 2571 */ 2572 jme_reset(sc); 2573 2574 /* 2575 * Setup MSI/MSI-X vectors to interrupts mapping 2576 */ 2577 jme_set_msinum(sc); 2578 2579 if (JME_ENABLE_HWRSS(sc)) 2580 jme_enable_rss(sc); 2581 else 2582 jme_disable_rss(sc); 2583 2584 /* Init RX descriptors */ 2585 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2586 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]); 2587 if (error) { 2588 if_printf(ifp, "initialization failed: " 2589 "no memory for %dth RX ring.\n", r); 2590 jme_stop(sc); 2591 return; 2592 } 2593 } 2594 2595 /* Init TX descriptors */ 2596 jme_init_tx_ring(sc); 2597 2598 /* Initialize shadow status block. */ 2599 jme_init_ssb(sc); 2600 2601 /* Reprogram the station address. */ 2602 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2603 CSR_WRITE_4(sc, JME_PAR0, 2604 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2605 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2606 2607 /* 2608 * Configure Tx queue. 2609 * Tx priority queue weight value : 0 2610 * Tx FIFO threshold for processing next packet : 16QW 2611 * Maximum Tx DMA length : 512 2612 * Allow Tx DMA burst. 2613 */ 2614 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2615 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2616 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2617 sc->jme_txcsr |= sc->jme_tx_dma_size; 2618 sc->jme_txcsr |= TXCSR_DMA_BURST; 2619 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2620 2621 /* Set Tx descriptor counter. */ 2622 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt); 2623 2624 /* Set Tx ring address to the hardware. */ 2625 paddr = sc->jme_cdata.jme_tx_ring_paddr; 2626 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2627 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2628 2629 /* Configure TxMAC parameters. */ 2630 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2631 reg |= TXMAC_THRESH_1_PKT; 2632 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2633 CSR_WRITE_4(sc, JME_TXMAC, reg); 2634 2635 /* 2636 * Configure Rx queue. 2637 * FIFO full threshold for transmitting Tx pause packet : 128T 2638 * FIFO threshold for processing next packet : 128QW 2639 * Rx queue 0 select 2640 * Max Rx DMA length : 128 2641 * Rx descriptor retry : 32 2642 * Rx descriptor retry time gap : 256ns 2643 * Don't receive runt/bad frame. 2644 */ 2645 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2646 #if 0 2647 /* 2648 * Since Rx FIFO size is 4K bytes, receiving frames larger 2649 * than 4K bytes will suffer from Rx FIFO overruns. So 2650 * decrease FIFO threshold to reduce the FIFO overruns for 2651 * frames larger than 4000 bytes. 2652 * For best performance of standard MTU sized frames use 2653 * maximum allowable FIFO threshold, 128QW. 2654 */ 2655 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2656 JME_RX_FIFO_SIZE) 2657 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2658 else 2659 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2660 #else 2661 /* Improve PCI Express compatibility */ 2662 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2663 #endif 2664 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2665 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2666 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2667 /* XXX TODO DROP_BAD */ 2668 2669 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2670 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 2671 2672 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2673 2674 /* Set Rx descriptor counter. */ 2675 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt); 2676 2677 /* Set Rx ring address to the hardware. */ 2678 paddr = rdata->jme_rx_ring_paddr; 2679 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2680 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2681 } 2682 2683 /* Clear receive filter. */ 2684 CSR_WRITE_4(sc, JME_RXMAC, 0); 2685 2686 /* Set up the receive filter. */ 2687 jme_set_filter(sc); 2688 jme_set_vlan(sc); 2689 2690 /* 2691 * Disable all WOL bits as WOL can interfere normal Rx 2692 * operation. Also clear WOL detection status bits. 2693 */ 2694 reg = CSR_READ_4(sc, JME_PMCS); 2695 reg &= ~PMCS_WOL_ENB_MASK; 2696 CSR_WRITE_4(sc, JME_PMCS, reg); 2697 2698 /* 2699 * Pad 10bytes right before received frame. This will greatly 2700 * help Rx performance on strict-alignment architectures as 2701 * it does not need to copy the frame to align the payload. 2702 */ 2703 reg = CSR_READ_4(sc, JME_RXMAC); 2704 reg |= RXMAC_PAD_10BYTES; 2705 2706 if (ifp->if_capenable & IFCAP_RXCSUM) 2707 reg |= RXMAC_CSUM_ENB; 2708 CSR_WRITE_4(sc, JME_RXMAC, reg); 2709 2710 /* Configure general purpose reg0 */ 2711 reg = CSR_READ_4(sc, JME_GPREG0); 2712 reg &= ~GPREG0_PCC_UNIT_MASK; 2713 /* Set PCC timer resolution to micro-seconds unit. */ 2714 reg |= GPREG0_PCC_UNIT_US; 2715 /* 2716 * Disable all shadow register posting as we have to read 2717 * JME_INTR_STATUS register in jme_intr. Also it seems 2718 * that it's hard to synchronize interrupt status between 2719 * hardware and software with shadow posting due to 2720 * requirements of bus_dmamap_sync(9). 2721 */ 2722 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2723 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2724 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2725 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2726 /* Disable posting of DW0. */ 2727 reg &= ~GPREG0_POST_DW0_ENB; 2728 /* Clear PME message. */ 2729 reg &= ~GPREG0_PME_ENB; 2730 /* Set PHY address. */ 2731 reg &= ~GPREG0_PHY_ADDR_MASK; 2732 reg |= sc->jme_phyaddr; 2733 CSR_WRITE_4(sc, JME_GPREG0, reg); 2734 2735 /* Configure Tx queue 0 packet completion coalescing. */ 2736 jme_set_tx_coal(sc); 2737 2738 /* Configure Rx queues packet completion coalescing. */ 2739 jme_set_rx_coal(sc); 2740 2741 /* Configure shadow status block but don't enable posting. */ 2742 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2743 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2744 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2745 2746 /* Disable Timer 1 and Timer 2. */ 2747 CSR_WRITE_4(sc, JME_TIMER1, 0); 2748 CSR_WRITE_4(sc, JME_TIMER2, 0); 2749 2750 /* Configure retry transmit period, retry limit value. */ 2751 CSR_WRITE_4(sc, JME_TXTRHD, 2752 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2753 TXTRHD_RT_PERIOD_MASK) | 2754 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2755 TXTRHD_RT_LIMIT_SHIFT)); 2756 2757 #ifdef DEVICE_POLLING 2758 if (!(ifp->if_flags & IFF_POLLING)) 2759 #endif 2760 /* Initialize the interrupt mask. */ 2761 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2762 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2763 2764 /* 2765 * Enabling Tx/Rx DMA engines and Rx queue processing is 2766 * done after detection of valid link in jme_miibus_statchg. 2767 */ 2768 sc->jme_has_link = FALSE; 2769 2770 /* Set the current media. */ 2771 mii = device_get_softc(sc->jme_miibus); 2772 mii_mediachg(mii); 2773 2774 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2775 2776 ifp->if_flags |= IFF_RUNNING; 2777 ifp->if_flags &= ~IFF_OACTIVE; 2778 } 2779 2780 static void 2781 jme_stop(struct jme_softc *sc) 2782 { 2783 struct ifnet *ifp = &sc->arpcom.ac_if; 2784 struct jme_txdesc *txd; 2785 struct jme_rxdesc *rxd; 2786 struct jme_rxdata *rdata; 2787 int i, r; 2788 2789 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2790 2791 /* 2792 * Mark the interface down and cancel the watchdog timer. 2793 */ 2794 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2795 ifp->if_timer = 0; 2796 2797 callout_stop(&sc->jme_tick_ch); 2798 sc->jme_has_link = FALSE; 2799 2800 /* 2801 * Disable interrupts. 2802 */ 2803 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2804 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2805 2806 /* Disable updating shadow status block. */ 2807 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2808 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2809 2810 /* Stop receiver, transmitter. */ 2811 jme_stop_rx(sc); 2812 jme_stop_tx(sc); 2813 2814 /* 2815 * Free partial finished RX segments 2816 */ 2817 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2818 rdata = &sc->jme_cdata.jme_rx_data[r]; 2819 if (rdata->jme_rxhead != NULL) 2820 m_freem(rdata->jme_rxhead); 2821 JME_RXCHAIN_RESET(rdata); 2822 } 2823 2824 /* 2825 * Free RX and TX mbufs still in the queues. 2826 */ 2827 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 2828 rdata = &sc->jme_cdata.jme_rx_data[r]; 2829 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 2830 rxd = &rdata->jme_rxdesc[i]; 2831 if (rxd->rx_m != NULL) { 2832 bus_dmamap_unload(rdata->jme_rx_tag, 2833 rxd->rx_dmamap); 2834 m_freem(rxd->rx_m); 2835 rxd->rx_m = NULL; 2836 } 2837 } 2838 } 2839 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) { 2840 txd = &sc->jme_cdata.jme_txdesc[i]; 2841 if (txd->tx_m != NULL) { 2842 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 2843 txd->tx_dmamap); 2844 m_freem(txd->tx_m); 2845 txd->tx_m = NULL; 2846 txd->tx_ndesc = 0; 2847 } 2848 } 2849 } 2850 2851 static void 2852 jme_stop_tx(struct jme_softc *sc) 2853 { 2854 uint32_t reg; 2855 int i; 2856 2857 reg = CSR_READ_4(sc, JME_TXCSR); 2858 if ((reg & TXCSR_TX_ENB) == 0) 2859 return; 2860 reg &= ~TXCSR_TX_ENB; 2861 CSR_WRITE_4(sc, JME_TXCSR, reg); 2862 for (i = JME_TIMEOUT; i > 0; i--) { 2863 DELAY(1); 2864 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2865 break; 2866 } 2867 if (i == 0) 2868 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2869 } 2870 2871 static void 2872 jme_stop_rx(struct jme_softc *sc) 2873 { 2874 uint32_t reg; 2875 int i; 2876 2877 reg = CSR_READ_4(sc, JME_RXCSR); 2878 if ((reg & RXCSR_RX_ENB) == 0) 2879 return; 2880 reg &= ~RXCSR_RX_ENB; 2881 CSR_WRITE_4(sc, JME_RXCSR, reg); 2882 for (i = JME_TIMEOUT; i > 0; i--) { 2883 DELAY(1); 2884 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2885 break; 2886 } 2887 if (i == 0) 2888 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2889 } 2890 2891 static void 2892 jme_init_tx_ring(struct jme_softc *sc) 2893 { 2894 struct jme_chain_data *cd; 2895 struct jme_txdesc *txd; 2896 int i; 2897 2898 sc->jme_cdata.jme_tx_prod = 0; 2899 sc->jme_cdata.jme_tx_cons = 0; 2900 sc->jme_cdata.jme_tx_cnt = 0; 2901 2902 cd = &sc->jme_cdata; 2903 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc)); 2904 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) { 2905 txd = &sc->jme_cdata.jme_txdesc[i]; 2906 txd->tx_m = NULL; 2907 txd->tx_desc = &cd->jme_tx_ring[i]; 2908 txd->tx_ndesc = 0; 2909 } 2910 } 2911 2912 static void 2913 jme_init_ssb(struct jme_softc *sc) 2914 { 2915 struct jme_chain_data *cd; 2916 2917 cd = &sc->jme_cdata; 2918 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 2919 } 2920 2921 static int 2922 jme_init_rx_ring(struct jme_rxdata *rdata) 2923 { 2924 struct jme_rxdesc *rxd; 2925 int i; 2926 2927 KKASSERT(rdata->jme_rxhead == NULL && 2928 rdata->jme_rxtail == NULL && 2929 rdata->jme_rxlen == 0); 2930 rdata->jme_rx_cons = 0; 2931 2932 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata)); 2933 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 2934 int error; 2935 2936 rxd = &rdata->jme_rxdesc[i]; 2937 rxd->rx_m = NULL; 2938 rxd->rx_desc = &rdata->jme_rx_ring[i]; 2939 error = jme_newbuf(rdata, rxd, 1); 2940 if (error) 2941 return error; 2942 } 2943 return 0; 2944 } 2945 2946 static int 2947 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init) 2948 { 2949 struct mbuf *m; 2950 bus_dma_segment_t segs; 2951 bus_dmamap_t map; 2952 int error, nsegs; 2953 2954 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2955 if (m == NULL) 2956 return ENOBUFS; 2957 /* 2958 * JMC250 has 64bit boundary alignment limitation so jme(4) 2959 * takes advantage of 10 bytes padding feature of hardware 2960 * in order not to copy entire frame to align IP header on 2961 * 32bit boundary. 2962 */ 2963 m->m_len = m->m_pkthdr.len = MCLBYTES; 2964 2965 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 2966 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 2967 BUS_DMA_NOWAIT); 2968 if (error) { 2969 m_freem(m); 2970 if (init) { 2971 if_printf(&rdata->jme_sc->arpcom.ac_if, 2972 "can't load RX mbuf\n"); 2973 } 2974 return error; 2975 } 2976 2977 if (rxd->rx_m != NULL) { 2978 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 2979 BUS_DMASYNC_POSTREAD); 2980 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 2981 } 2982 map = rxd->rx_dmamap; 2983 rxd->rx_dmamap = rdata->jme_rx_sparemap; 2984 rdata->jme_rx_sparemap = map; 2985 rxd->rx_m = m; 2986 rxd->rx_paddr = segs.ds_addr; 2987 2988 jme_setup_rxdesc(rxd); 2989 return 0; 2990 } 2991 2992 static void 2993 jme_set_vlan(struct jme_softc *sc) 2994 { 2995 struct ifnet *ifp = &sc->arpcom.ac_if; 2996 uint32_t reg; 2997 2998 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2999 3000 reg = CSR_READ_4(sc, JME_RXMAC); 3001 reg &= ~RXMAC_VLAN_ENB; 3002 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 3003 reg |= RXMAC_VLAN_ENB; 3004 CSR_WRITE_4(sc, JME_RXMAC, reg); 3005 } 3006 3007 static void 3008 jme_set_filter(struct jme_softc *sc) 3009 { 3010 struct ifnet *ifp = &sc->arpcom.ac_if; 3011 struct ifmultiaddr *ifma; 3012 uint32_t crc; 3013 uint32_t mchash[2]; 3014 uint32_t rxcfg; 3015 3016 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3017 3018 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3019 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3020 RXMAC_ALLMULTI); 3021 3022 /* 3023 * Always accept frames destined to our station address. 3024 * Always accept broadcast frames. 3025 */ 3026 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 3027 3028 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 3029 if (ifp->if_flags & IFF_PROMISC) 3030 rxcfg |= RXMAC_PROMISC; 3031 if (ifp->if_flags & IFF_ALLMULTI) 3032 rxcfg |= RXMAC_ALLMULTI; 3033 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3034 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3035 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3036 return; 3037 } 3038 3039 /* 3040 * Set up the multicast address filter by passing all multicast 3041 * addresses through a CRC generator, and then using the low-order 3042 * 6 bits as an index into the 64 bit multicast hash table. The 3043 * high order bits select the register, while the rest of the bits 3044 * select the bit within the register. 3045 */ 3046 rxcfg |= RXMAC_MULTICAST; 3047 bzero(mchash, sizeof(mchash)); 3048 3049 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3050 if (ifma->ifma_addr->sa_family != AF_LINK) 3051 continue; 3052 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3053 ifma->ifma_addr), ETHER_ADDR_LEN); 3054 3055 /* Just want the 6 least significant bits. */ 3056 crc &= 0x3f; 3057 3058 /* Set the corresponding bit in the hash table. */ 3059 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3060 } 3061 3062 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3063 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3064 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3065 } 3066 3067 static int 3068 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 3069 { 3070 struct jme_softc *sc = arg1; 3071 struct ifnet *ifp = &sc->arpcom.ac_if; 3072 int error, v; 3073 3074 ifnet_serialize_all(ifp); 3075 3076 v = sc->jme_tx_coal_to; 3077 error = sysctl_handle_int(oidp, &v, 0, req); 3078 if (error || req->newptr == NULL) 3079 goto back; 3080 3081 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 3082 error = EINVAL; 3083 goto back; 3084 } 3085 3086 if (v != sc->jme_tx_coal_to) { 3087 sc->jme_tx_coal_to = v; 3088 if (ifp->if_flags & IFF_RUNNING) 3089 jme_set_tx_coal(sc); 3090 } 3091 back: 3092 ifnet_deserialize_all(ifp); 3093 return error; 3094 } 3095 3096 static int 3097 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3098 { 3099 struct jme_softc *sc = arg1; 3100 struct ifnet *ifp = &sc->arpcom.ac_if; 3101 int error, v; 3102 3103 ifnet_serialize_all(ifp); 3104 3105 v = sc->jme_tx_coal_pkt; 3106 error = sysctl_handle_int(oidp, &v, 0, req); 3107 if (error || req->newptr == NULL) 3108 goto back; 3109 3110 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 3111 error = EINVAL; 3112 goto back; 3113 } 3114 3115 if (v != sc->jme_tx_coal_pkt) { 3116 sc->jme_tx_coal_pkt = v; 3117 if (ifp->if_flags & IFF_RUNNING) 3118 jme_set_tx_coal(sc); 3119 } 3120 back: 3121 ifnet_deserialize_all(ifp); 3122 return error; 3123 } 3124 3125 static int 3126 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 3127 { 3128 struct jme_softc *sc = arg1; 3129 struct ifnet *ifp = &sc->arpcom.ac_if; 3130 int error, v; 3131 3132 ifnet_serialize_all(ifp); 3133 3134 v = sc->jme_rx_coal_to; 3135 error = sysctl_handle_int(oidp, &v, 0, req); 3136 if (error || req->newptr == NULL) 3137 goto back; 3138 3139 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 3140 error = EINVAL; 3141 goto back; 3142 } 3143 3144 if (v != sc->jme_rx_coal_to) { 3145 sc->jme_rx_coal_to = v; 3146 if (ifp->if_flags & IFF_RUNNING) 3147 jme_set_rx_coal(sc); 3148 } 3149 back: 3150 ifnet_deserialize_all(ifp); 3151 return error; 3152 } 3153 3154 static int 3155 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3156 { 3157 struct jme_softc *sc = arg1; 3158 struct ifnet *ifp = &sc->arpcom.ac_if; 3159 int error, v; 3160 3161 ifnet_serialize_all(ifp); 3162 3163 v = sc->jme_rx_coal_pkt; 3164 error = sysctl_handle_int(oidp, &v, 0, req); 3165 if (error || req->newptr == NULL) 3166 goto back; 3167 3168 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 3169 error = EINVAL; 3170 goto back; 3171 } 3172 3173 if (v != sc->jme_rx_coal_pkt) { 3174 sc->jme_rx_coal_pkt = v; 3175 if (ifp->if_flags & IFF_RUNNING) 3176 jme_set_rx_coal(sc); 3177 } 3178 back: 3179 ifnet_deserialize_all(ifp); 3180 return error; 3181 } 3182 3183 static void 3184 jme_set_tx_coal(struct jme_softc *sc) 3185 { 3186 uint32_t reg; 3187 3188 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3189 PCCTX_COAL_TO_MASK; 3190 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3191 PCCTX_COAL_PKT_MASK; 3192 reg |= PCCTX_COAL_TXQ0; 3193 CSR_WRITE_4(sc, JME_PCCTX, reg); 3194 } 3195 3196 static void 3197 jme_set_rx_coal(struct jme_softc *sc) 3198 { 3199 uint32_t reg; 3200 int r; 3201 3202 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3203 PCCRX_COAL_TO_MASK; 3204 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3205 PCCRX_COAL_PKT_MASK; 3206 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) 3207 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 3208 } 3209 3210 #ifdef DEVICE_POLLING 3211 3212 static void 3213 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3214 { 3215 struct jme_softc *sc = ifp->if_softc; 3216 uint32_t status; 3217 int r; 3218 3219 ASSERT_SERIALIZED(&sc->jme_serialize); 3220 3221 switch (cmd) { 3222 case POLL_REGISTER: 3223 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 3224 break; 3225 3226 case POLL_DEREGISTER: 3227 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 3228 break; 3229 3230 case POLL_AND_CHECK_STATUS: 3231 case POLL_ONLY: 3232 status = CSR_READ_4(sc, JME_INTR_STATUS); 3233 3234 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3235 struct jme_rxdata *rdata = 3236 &sc->jme_cdata.jme_rx_data[r]; 3237 3238 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3239 jme_rxeof(rdata, count); 3240 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3241 } 3242 3243 if (status & INTR_RXQ_DESC_EMPTY) { 3244 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 3245 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 3246 RXCSR_RX_ENB | RXCSR_RXQ_START); 3247 } 3248 3249 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize); 3250 jme_txeof(sc); 3251 if (!ifq_is_empty(&ifp->if_snd)) 3252 if_devstart(ifp); 3253 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize); 3254 break; 3255 } 3256 } 3257 3258 #endif /* DEVICE_POLLING */ 3259 3260 static int 3261 jme_rxring_dma_alloc(struct jme_rxdata *rdata) 3262 { 3263 bus_dmamem_t dmem; 3264 int error, asize; 3265 3266 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN); 3267 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag, 3268 JME_RX_RING_ALIGN, 0, 3269 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3270 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3271 if (error) { 3272 device_printf(rdata->jme_sc->jme_dev, 3273 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx); 3274 return error; 3275 } 3276 rdata->jme_rx_ring_tag = dmem.dmem_tag; 3277 rdata->jme_rx_ring_map = dmem.dmem_map; 3278 rdata->jme_rx_ring = dmem.dmem_addr; 3279 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 3280 3281 return 0; 3282 } 3283 3284 static int 3285 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr) 3286 { 3287 if ((paddr & 0xffffffff) == 0) { 3288 /* 3289 * Don't allow lower 32bits of the RX buffer's 3290 * physical address to be 0, else it will break 3291 * hardware pending RSS information delivery 3292 * detection on RX path. 3293 */ 3294 return 1; 3295 } 3296 return 0; 3297 } 3298 3299 static int 3300 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata) 3301 { 3302 bus_addr_t lowaddr; 3303 int i, error; 3304 3305 lowaddr = BUS_SPACE_MAXADDR; 3306 if (JME_ENABLE_HWRSS(rdata->jme_sc)) { 3307 /* jme_rxbuf_dma_filter will be called */ 3308 lowaddr = BUS_SPACE_MAXADDR_32BIT; 3309 } 3310 3311 /* Create tag for Rx buffers. */ 3312 error = bus_dma_tag_create( 3313 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */ 3314 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3315 lowaddr, /* lowaddr */ 3316 BUS_SPACE_MAXADDR, /* highaddr */ 3317 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */ 3318 MCLBYTES, /* maxsize */ 3319 1, /* nsegments */ 3320 MCLBYTES, /* maxsegsize */ 3321 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3322 &rdata->jme_rx_tag); 3323 if (error) { 3324 device_printf(rdata->jme_sc->jme_dev, 3325 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx); 3326 return error; 3327 } 3328 3329 /* Create DMA maps for Rx buffers. */ 3330 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3331 &rdata->jme_rx_sparemap); 3332 if (error) { 3333 device_printf(rdata->jme_sc->jme_dev, 3334 "could not create %dth spare Rx dmamap.\n", 3335 rdata->jme_rx_idx); 3336 bus_dma_tag_destroy(rdata->jme_rx_tag); 3337 rdata->jme_rx_tag = NULL; 3338 return error; 3339 } 3340 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) { 3341 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3342 3343 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3344 &rxd->rx_dmamap); 3345 if (error) { 3346 int j; 3347 3348 device_printf(rdata->jme_sc->jme_dev, 3349 "could not create %dth Rx dmamap " 3350 "for %dth RX ring.\n", i, rdata->jme_rx_idx); 3351 3352 for (j = 0; j < i; ++j) { 3353 rxd = &rdata->jme_rxdesc[j]; 3354 bus_dmamap_destroy(rdata->jme_rx_tag, 3355 rxd->rx_dmamap); 3356 } 3357 bus_dmamap_destroy(rdata->jme_rx_tag, 3358 rdata->jme_rx_sparemap); 3359 bus_dma_tag_destroy(rdata->jme_rx_tag); 3360 rdata->jme_rx_tag = NULL; 3361 return error; 3362 } 3363 } 3364 return 0; 3365 } 3366 3367 static void 3368 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3369 { 3370 int r; 3371 3372 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3373 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3374 3375 if (status & rdata->jme_rx_coal) { 3376 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3377 jme_rxeof(rdata, -1); 3378 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3379 } 3380 } 3381 } 3382 3383 static void 3384 jme_enable_rss(struct jme_softc *sc) 3385 { 3386 uint32_t rssc, ind; 3387 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE]; 3388 int i; 3389 3390 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 || 3391 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4, 3392 ("%s: invalid # of RX rings (%d)", 3393 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt)); 3394 3395 rssc = RSSC_HASH_64_ENTRY; 3396 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3397 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1; 3398 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3399 CSR_WRITE_4(sc, JME_RSSC, rssc); 3400 3401 toeplitz_get_key(key, sizeof(key)); 3402 for (i = 0; i < RSSKEY_NREGS; ++i) { 3403 uint32_t keyreg; 3404 3405 keyreg = RSSKEY_REGVAL(key, i); 3406 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg); 3407 3408 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg); 3409 } 3410 3411 /* 3412 * Create redirect table in following fashion: 3413 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3414 */ 3415 ind = 0; 3416 for (i = 0; i < RSSTBL_REGSIZE; ++i) { 3417 int q; 3418 3419 q = i % sc->jme_cdata.jme_rx_ring_cnt; 3420 ind |= q << (i * 8); 3421 } 3422 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3423 3424 for (i = 0; i < RSSTBL_NREGS; ++i) 3425 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3426 } 3427 3428 static void 3429 jme_disable_rss(struct jme_softc *sc) 3430 { 3431 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3432 } 3433 3434 static void 3435 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3436 { 3437 struct jme_softc *sc = ifp->if_softc; 3438 3439 ifnet_serialize_array_enter(sc->jme_serialize_arr, 3440 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3441 } 3442 3443 static void 3444 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3445 { 3446 struct jme_softc *sc = ifp->if_softc; 3447 3448 ifnet_serialize_array_exit(sc->jme_serialize_arr, 3449 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3450 } 3451 3452 static int 3453 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3454 { 3455 struct jme_softc *sc = ifp->if_softc; 3456 3457 return ifnet_serialize_array_try(sc->jme_serialize_arr, 3458 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz); 3459 } 3460 3461 #ifdef INVARIANTS 3462 3463 static void 3464 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3465 boolean_t serialized) 3466 { 3467 struct jme_softc *sc = ifp->if_softc; 3468 3469 ifnet_serialize_array_assert(sc->jme_serialize_arr, 3470 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, 3471 slz, serialized); 3472 } 3473 3474 #endif /* INVARIANTS */ 3475 3476 static void 3477 jme_msix_try_alloc(device_t dev) 3478 { 3479 struct jme_softc *sc = device_get_softc(dev); 3480 struct jme_msix_data *msix; 3481 int error, i, r, msix_enable, msix_count; 3482 int offset, offset_def; 3483 3484 msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt; 3485 KKASSERT(msix_count <= JME_NMSIX); 3486 3487 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable); 3488 3489 /* 3490 * We leave the 1st MSI-X vector unused, so we 3491 * actually need msix_count + 1 MSI-X vectors. 3492 */ 3493 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1)) 3494 return; 3495 3496 for (i = 0; i < msix_count; ++i) 3497 sc->jme_msix[i].jme_msix_rid = -1; 3498 3499 i = 0; 3500 3501 /* 3502 * Setup TX MSI-X 3503 */ 3504 3505 offset_def = device_get_unit(dev) % ncpus2; 3506 offset = device_getenv_int(dev, "msix.txoff", offset_def); 3507 if (offset >= ncpus2) { 3508 device_printf(dev, "invalid msix.txoff %d, use %d\n", 3509 offset, offset_def); 3510 offset = offset_def; 3511 } 3512 3513 msix = &sc->jme_msix[i++]; 3514 msix->jme_msix_cpuid = offset; 3515 sc->jme_tx_cpuid = msix->jme_msix_cpuid; 3516 msix->jme_msix_arg = &sc->jme_cdata; 3517 msix->jme_msix_func = jme_msix_tx; 3518 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO; 3519 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize; 3520 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx", 3521 device_get_nameunit(dev)); 3522 3523 /* 3524 * Setup RX MSI-X 3525 */ 3526 3527 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) { 3528 offset = 0; 3529 } else { 3530 offset_def = (sc->jme_cdata.jme_rx_ring_cnt * 3531 device_get_unit(dev)) % ncpus2; 3532 3533 offset = device_getenv_int(dev, "msix.rxoff", offset_def); 3534 if (offset >= ncpus2 || 3535 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) { 3536 device_printf(dev, "invalid msix.rxoff %d, use %d\n", 3537 offset, offset_def); 3538 offset = offset_def; 3539 } 3540 } 3541 3542 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) { 3543 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3544 3545 msix = &sc->jme_msix[i++]; 3546 msix->jme_msix_cpuid = r + offset; 3547 KKASSERT(msix->jme_msix_cpuid < ncpus2); 3548 msix->jme_msix_arg = rdata; 3549 msix->jme_msix_func = jme_msix_rx; 3550 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty; 3551 msix->jme_msix_serialize = &rdata->jme_rx_serialize; 3552 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), 3553 "%s rx%d", device_get_nameunit(dev), r); 3554 } 3555 3556 KKASSERT(i == msix_count); 3557 3558 error = pci_setup_msix(dev); 3559 if (error) 3560 return; 3561 3562 /* Setup jme_msix_cnt early, so we could cleanup */ 3563 sc->jme_msix_cnt = msix_count; 3564 3565 for (i = 0; i < msix_count; ++i) { 3566 msix = &sc->jme_msix[i]; 3567 3568 msix->jme_msix_vector = i + 1; 3569 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector, 3570 &msix->jme_msix_rid, msix->jme_msix_cpuid); 3571 if (error) 3572 goto back; 3573 3574 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3575 &msix->jme_msix_rid, RF_ACTIVE); 3576 if (msix->jme_msix_res == NULL) { 3577 error = ENOMEM; 3578 goto back; 3579 } 3580 } 3581 3582 for (i = 0; i < JME_INTR_CNT; ++i) { 3583 uint32_t intr_mask = (1 << i); 3584 int x; 3585 3586 if ((JME_INTRS & intr_mask) == 0) 3587 continue; 3588 3589 for (x = 0; x < msix_count; ++x) { 3590 msix = &sc->jme_msix[x]; 3591 if (msix->jme_msix_intrs & intr_mask) { 3592 int reg, shift; 3593 3594 reg = i / JME_MSINUM_FACTOR; 3595 KKASSERT(reg < JME_MSINUM_CNT); 3596 3597 shift = (i % JME_MSINUM_FACTOR) * 4; 3598 3599 sc->jme_msinum[reg] |= 3600 (msix->jme_msix_vector << shift); 3601 3602 break; 3603 } 3604 } 3605 } 3606 3607 if (bootverbose) { 3608 for (i = 0; i < JME_MSINUM_CNT; ++i) { 3609 device_printf(dev, "MSINUM%d: %#x\n", i, 3610 sc->jme_msinum[i]); 3611 } 3612 } 3613 3614 pci_enable_msix(dev); 3615 sc->jme_irq_type = PCI_INTR_TYPE_MSIX; 3616 3617 back: 3618 if (error) 3619 jme_msix_free(dev); 3620 } 3621 3622 static int 3623 jme_intr_alloc(device_t dev) 3624 { 3625 struct jme_softc *sc = device_get_softc(dev); 3626 u_int irq_flags; 3627 3628 jme_msix_try_alloc(dev); 3629 3630 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3631 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable, 3632 &sc->jme_irq_rid, &irq_flags); 3633 3634 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3635 &sc->jme_irq_rid, irq_flags); 3636 if (sc->jme_irq_res == NULL) { 3637 device_printf(dev, "can't allocate irq\n"); 3638 return ENXIO; 3639 } 3640 } 3641 return 0; 3642 } 3643 3644 static void 3645 jme_msix_free(device_t dev) 3646 { 3647 struct jme_softc *sc = device_get_softc(dev); 3648 int i; 3649 3650 KKASSERT(sc->jme_msix_cnt > 1); 3651 3652 for (i = 0; i < sc->jme_msix_cnt; ++i) { 3653 struct jme_msix_data *msix = &sc->jme_msix[i]; 3654 3655 if (msix->jme_msix_res != NULL) { 3656 bus_release_resource(dev, SYS_RES_IRQ, 3657 msix->jme_msix_rid, msix->jme_msix_res); 3658 msix->jme_msix_res = NULL; 3659 } 3660 if (msix->jme_msix_rid >= 0) { 3661 pci_release_msix_vector(dev, msix->jme_msix_rid); 3662 msix->jme_msix_rid = -1; 3663 } 3664 } 3665 pci_teardown_msix(dev); 3666 } 3667 3668 static void 3669 jme_intr_free(device_t dev) 3670 { 3671 struct jme_softc *sc = device_get_softc(dev); 3672 3673 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3674 if (sc->jme_irq_res != NULL) { 3675 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 3676 sc->jme_irq_res); 3677 } 3678 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI) 3679 pci_release_msi(dev); 3680 } else { 3681 jme_msix_free(dev); 3682 } 3683 } 3684 3685 static void 3686 jme_msix_tx(void *xcd) 3687 { 3688 struct jme_chain_data *cd = xcd; 3689 struct jme_softc *sc = cd->jme_sc; 3690 struct ifnet *ifp = &sc->arpcom.ac_if; 3691 3692 ASSERT_SERIALIZED(&cd->jme_tx_serialize); 3693 3694 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3695 3696 CSR_WRITE_4(sc, JME_INTR_STATUS, 3697 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP); 3698 3699 if (ifp->if_flags & IFF_RUNNING) { 3700 jme_txeof(sc); 3701 if (!ifq_is_empty(&ifp->if_snd)) 3702 if_devstart(ifp); 3703 } 3704 3705 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO); 3706 } 3707 3708 static void 3709 jme_msix_rx(void *xrdata) 3710 { 3711 struct jme_rxdata *rdata = xrdata; 3712 struct jme_softc *sc = rdata->jme_sc; 3713 struct ifnet *ifp = &sc->arpcom.ac_if; 3714 uint32_t status; 3715 3716 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3717 3718 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, 3719 (rdata->jme_rx_coal | rdata->jme_rx_empty)); 3720 3721 status = CSR_READ_4(sc, JME_INTR_STATUS); 3722 status &= (rdata->jme_rx_coal | rdata->jme_rx_empty); 3723 3724 if (status & rdata->jme_rx_coal) 3725 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp); 3726 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 3727 3728 if (ifp->if_flags & IFF_RUNNING) { 3729 if (status & rdata->jme_rx_coal) 3730 jme_rxeof(rdata, -1); 3731 3732 if (status & rdata->jme_rx_empty) { 3733 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 3734 RXCSR_RX_ENB | RXCSR_RXQ_START); 3735 rdata->jme_rx_emp++; 3736 } 3737 } 3738 3739 CSR_WRITE_4(sc, JME_INTR_MASK_SET, 3740 (rdata->jme_rx_coal | rdata->jme_rx_empty)); 3741 } 3742 3743 static void 3744 jme_set_msinum(struct jme_softc *sc) 3745 { 3746 int i; 3747 3748 for (i = 0; i < JME_MSINUM_CNT; ++i) 3749 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]); 3750 } 3751 3752 static int 3753 jme_intr_setup(device_t dev) 3754 { 3755 struct jme_softc *sc = device_get_softc(dev); 3756 struct ifnet *ifp = &sc->arpcom.ac_if; 3757 int error; 3758 3759 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3760 return jme_msix_setup(dev); 3761 3762 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, 3763 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize); 3764 if (error) { 3765 device_printf(dev, "could not set up interrupt handler.\n"); 3766 return error; 3767 } 3768 3769 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res); 3770 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 3771 return 0; 3772 } 3773 3774 static void 3775 jme_intr_teardown(device_t dev) 3776 { 3777 struct jme_softc *sc = device_get_softc(dev); 3778 3779 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3780 jme_msix_teardown(dev, sc->jme_msix_cnt); 3781 else 3782 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 3783 } 3784 3785 static int 3786 jme_msix_setup(device_t dev) 3787 { 3788 struct jme_softc *sc = device_get_softc(dev); 3789 struct ifnet *ifp = &sc->arpcom.ac_if; 3790 int x; 3791 3792 for (x = 0; x < sc->jme_msix_cnt; ++x) { 3793 struct jme_msix_data *msix = &sc->jme_msix[x]; 3794 int error; 3795 3796 error = bus_setup_intr_descr(dev, msix->jme_msix_res, 3797 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg, 3798 &msix->jme_msix_handle, msix->jme_msix_serialize, 3799 msix->jme_msix_desc); 3800 if (error) { 3801 device_printf(dev, "could not set up %s " 3802 "interrupt handler.\n", msix->jme_msix_desc); 3803 jme_msix_teardown(dev, x); 3804 return error; 3805 } 3806 } 3807 ifp->if_cpuid = sc->jme_tx_cpuid; 3808 return 0; 3809 } 3810 3811 static void 3812 jme_msix_teardown(device_t dev, int msix_count) 3813 { 3814 struct jme_softc *sc = device_get_softc(dev); 3815 int x; 3816 3817 for (x = 0; x < msix_count; ++x) { 3818 struct jme_msix_data *msix = &sc->jme_msix[x]; 3819 3820 bus_teardown_intr(dev, msix->jme_msix_res, 3821 msix->jme_msix_handle); 3822 } 3823 } 3824 3825 static void 3826 jme_serialize_skipmain(struct jme_softc *sc) 3827 { 3828 lwkt_serialize_array_enter(sc->jme_serialize_arr, 3829 sc->jme_serialize_cnt, 1); 3830 } 3831 3832 static void 3833 jme_deserialize_skipmain(struct jme_softc *sc) 3834 { 3835 lwkt_serialize_array_exit(sc->jme_serialize_arr, 3836 sc->jme_serialize_cnt, 1); 3837 } 3838