1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 */ 29 30 #include "opt_polling.h" 31 #include "opt_rss.h" 32 #include "opt_jme.h" 33 34 #include <sys/param.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/interrupt.h> 39 #include <sys/malloc.h> 40 #include <sys/proc.h> 41 #include <sys/rman.h> 42 #include <sys/serialize.h> 43 #include <sys/serialize2.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 48 #include <net/ethernet.h> 49 #include <net/if.h> 50 #include <net/bpf.h> 51 #include <net/if_arp.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/ifq_var.h> 55 #include <net/toeplitz.h> 56 #include <net/toeplitz2.h> 57 #include <net/vlan/if_vlan_var.h> 58 #include <net/vlan/if_vlan_ether.h> 59 60 #include <netinet/in.h> 61 62 #include <dev/netif/mii_layer/miivar.h> 63 #include <dev/netif/mii_layer/jmphyreg.h> 64 65 #include <bus/pci/pcireg.h> 66 #include <bus/pci/pcivar.h> 67 #include <bus/pci/pcidevs.h> 68 69 #include <dev/netif/jme/if_jmereg.h> 70 #include <dev/netif/jme/if_jmevar.h> 71 72 #include "miibus_if.h" 73 74 /* Define the following to disable printing Rx errors. */ 75 #undef JME_SHOW_ERRORS 76 77 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 78 79 #ifdef JME_RSS_DEBUG 80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 81 do { \ 82 if ((sc)->jme_rss_debug >= (lvl)) \ 83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 84 } while (0) 85 #else /* !JME_RSS_DEBUG */ 86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 87 #endif /* JME_RSS_DEBUG */ 88 89 static int jme_probe(device_t); 90 static int jme_attach(device_t); 91 static int jme_detach(device_t); 92 static int jme_shutdown(device_t); 93 static int jme_suspend(device_t); 94 static int jme_resume(device_t); 95 96 static int jme_miibus_readreg(device_t, int, int); 97 static int jme_miibus_writereg(device_t, int, int, int); 98 static void jme_miibus_statchg(device_t); 99 100 static void jme_init(void *); 101 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 102 static void jme_start(struct ifnet *); 103 static void jme_watchdog(struct ifnet *); 104 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 105 static int jme_mediachange(struct ifnet *); 106 #ifdef DEVICE_POLLING 107 static void jme_poll(struct ifnet *, enum poll_cmd, int); 108 #endif 109 static void jme_serialize(struct ifnet *, enum ifnet_serialize); 110 static void jme_deserialize(struct ifnet *, enum ifnet_serialize); 111 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize); 112 #ifdef INVARIANTS 113 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize, 114 boolean_t); 115 #endif 116 117 static void jme_intr(void *); 118 static void jme_msix_tx(void *); 119 static void jme_msix_rx(void *); 120 static void jme_txeof(struct jme_softc *); 121 static void jme_rxeof(struct jme_softc *, int, int); 122 static void jme_rx_intr(struct jme_softc *, uint32_t); 123 124 static int jme_msix_setup(device_t); 125 static void jme_msix_teardown(device_t, int); 126 static int jme_intr_setup(device_t); 127 static void jme_intr_teardown(device_t); 128 static void jme_msix_try_alloc(device_t); 129 static void jme_msix_free(device_t); 130 static int jme_intr_alloc(device_t); 131 static void jme_intr_free(device_t); 132 static int jme_dma_alloc(struct jme_softc *); 133 static void jme_dma_free(struct jme_softc *); 134 static int jme_init_rx_ring(struct jme_softc *, int); 135 static void jme_init_tx_ring(struct jme_softc *); 136 static void jme_init_ssb(struct jme_softc *); 137 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int); 138 static int jme_encap(struct jme_softc *, struct mbuf **); 139 static void jme_rxpkt(struct jme_softc *, int); 140 static int jme_rxring_dma_alloc(struct jme_softc *, int); 141 static int jme_rxbuf_dma_alloc(struct jme_softc *, int); 142 143 static void jme_tick(void *); 144 static void jme_stop(struct jme_softc *); 145 static void jme_reset(struct jme_softc *); 146 static void jme_set_msinum(struct jme_softc *); 147 static void jme_set_vlan(struct jme_softc *); 148 static void jme_set_filter(struct jme_softc *); 149 static void jme_stop_tx(struct jme_softc *); 150 static void jme_stop_rx(struct jme_softc *); 151 static void jme_mac_config(struct jme_softc *); 152 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 153 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 154 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 155 #ifdef notyet 156 static void jme_setwol(struct jme_softc *); 157 static void jme_setlinkspeed(struct jme_softc *); 158 #endif 159 static void jme_set_tx_coal(struct jme_softc *); 160 static void jme_set_rx_coal(struct jme_softc *); 161 static void jme_enable_rss(struct jme_softc *); 162 static void jme_disable_rss(struct jme_softc *); 163 164 static void jme_sysctl_node(struct jme_softc *); 165 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 166 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 167 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 168 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 169 170 /* 171 * Devices supported by this driver. 172 */ 173 static const struct jme_dev { 174 uint16_t jme_vendorid; 175 uint16_t jme_deviceid; 176 uint32_t jme_caps; 177 const char *jme_name; 178 } jme_devs[] = { 179 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 180 JME_CAP_JUMBO, 181 "JMicron Inc, JMC250 Gigabit Ethernet" }, 182 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 183 JME_CAP_FASTETH, 184 "JMicron Inc, JMC260 Fast Ethernet" }, 185 { 0, 0, 0, NULL } 186 }; 187 188 static device_method_t jme_methods[] = { 189 /* Device interface. */ 190 DEVMETHOD(device_probe, jme_probe), 191 DEVMETHOD(device_attach, jme_attach), 192 DEVMETHOD(device_detach, jme_detach), 193 DEVMETHOD(device_shutdown, jme_shutdown), 194 DEVMETHOD(device_suspend, jme_suspend), 195 DEVMETHOD(device_resume, jme_resume), 196 197 /* Bus interface. */ 198 DEVMETHOD(bus_print_child, bus_generic_print_child), 199 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 200 201 /* MII interface. */ 202 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 203 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 204 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 205 206 { NULL, NULL } 207 }; 208 209 static driver_t jme_driver = { 210 "jme", 211 jme_methods, 212 sizeof(struct jme_softc) 213 }; 214 215 static devclass_t jme_devclass; 216 217 DECLARE_DUMMY_MODULE(if_jme); 218 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 219 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL); 220 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL); 221 222 static const struct { 223 uint32_t jme_coal; 224 uint32_t jme_comp; 225 uint32_t jme_empty; 226 } jme_rx_status[JME_NRXRING_MAX] = { 227 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP, 228 INTR_RXQ0_DESC_EMPTY }, 229 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP, 230 INTR_RXQ1_DESC_EMPTY }, 231 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP, 232 INTR_RXQ2_DESC_EMPTY }, 233 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP, 234 INTR_RXQ3_DESC_EMPTY } 235 }; 236 237 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 238 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 239 static int jme_rx_ring_count = JME_NRXRING_DEF; 240 static int jme_msi_enable = 1; 241 static int jme_msix_enable = 1; 242 243 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 244 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 245 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 246 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable); 247 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable); 248 249 /* 250 * Read a PHY register on the MII of the JMC250. 251 */ 252 static int 253 jme_miibus_readreg(device_t dev, int phy, int reg) 254 { 255 struct jme_softc *sc = device_get_softc(dev); 256 uint32_t val; 257 int i; 258 259 /* For FPGA version, PHY address 0 should be ignored. */ 260 if (sc->jme_caps & JME_CAP_FPGA) { 261 if (phy == 0) 262 return (0); 263 } else { 264 if (sc->jme_phyaddr != phy) 265 return (0); 266 } 267 268 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 269 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 270 271 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 272 DELAY(1); 273 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 274 break; 275 } 276 if (i == 0) { 277 device_printf(sc->jme_dev, "phy read timeout: " 278 "phy %d, reg %d\n", phy, reg); 279 return (0); 280 } 281 282 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 283 } 284 285 /* 286 * Write a PHY register on the MII of the JMC250. 287 */ 288 static int 289 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 290 { 291 struct jme_softc *sc = device_get_softc(dev); 292 int i; 293 294 /* For FPGA version, PHY address 0 should be ignored. */ 295 if (sc->jme_caps & JME_CAP_FPGA) { 296 if (phy == 0) 297 return (0); 298 } else { 299 if (sc->jme_phyaddr != phy) 300 return (0); 301 } 302 303 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 304 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 305 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 306 307 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 308 DELAY(1); 309 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 310 break; 311 } 312 if (i == 0) { 313 device_printf(sc->jme_dev, "phy write timeout: " 314 "phy %d, reg %d\n", phy, reg); 315 } 316 317 return (0); 318 } 319 320 /* 321 * Callback from MII layer when media changes. 322 */ 323 static void 324 jme_miibus_statchg(device_t dev) 325 { 326 struct jme_softc *sc = device_get_softc(dev); 327 struct ifnet *ifp = &sc->arpcom.ac_if; 328 struct mii_data *mii; 329 struct jme_txdesc *txd; 330 bus_addr_t paddr; 331 int i, r; 332 333 ASSERT_IFNET_SERIALIZED_ALL(ifp); 334 335 if ((ifp->if_flags & IFF_RUNNING) == 0) 336 return; 337 338 mii = device_get_softc(sc->jme_miibus); 339 340 sc->jme_flags &= ~JME_FLAG_LINK; 341 if ((mii->mii_media_status & IFM_AVALID) != 0) { 342 switch (IFM_SUBTYPE(mii->mii_media_active)) { 343 case IFM_10_T: 344 case IFM_100_TX: 345 sc->jme_flags |= JME_FLAG_LINK; 346 break; 347 case IFM_1000_T: 348 if (sc->jme_caps & JME_CAP_FASTETH) 349 break; 350 sc->jme_flags |= JME_FLAG_LINK; 351 break; 352 default: 353 break; 354 } 355 } 356 357 /* 358 * Disabling Rx/Tx MACs have a side-effect of resetting 359 * JME_TXNDA/JME_RXNDA register to the first address of 360 * Tx/Rx descriptor address. So driver should reset its 361 * internal procucer/consumer pointer and reclaim any 362 * allocated resources. Note, just saving the value of 363 * JME_TXNDA and JME_RXNDA registers before stopping MAC 364 * and restoring JME_TXNDA/JME_RXNDA register is not 365 * sufficient to make sure correct MAC state because 366 * stopping MAC operation can take a while and hardware 367 * might have updated JME_TXNDA/JME_RXNDA registers 368 * during the stop operation. 369 */ 370 371 /* Disable interrupts */ 372 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 373 374 /* Stop driver */ 375 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 376 ifp->if_timer = 0; 377 callout_stop(&sc->jme_tick_ch); 378 379 /* Stop receiver/transmitter. */ 380 jme_stop_rx(sc); 381 jme_stop_tx(sc); 382 383 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 384 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 385 386 jme_rxeof(sc, r, -1); 387 if (rdata->jme_rxhead != NULL) 388 m_freem(rdata->jme_rxhead); 389 JME_RXCHAIN_RESET(sc, r); 390 391 /* 392 * Reuse configured Rx descriptors and reset 393 * procuder/consumer index. 394 */ 395 rdata->jme_rx_cons = 0; 396 } 397 398 jme_txeof(sc); 399 if (sc->jme_cdata.jme_tx_cnt != 0) { 400 /* Remove queued packets for transmit. */ 401 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 402 txd = &sc->jme_cdata.jme_txdesc[i]; 403 if (txd->tx_m != NULL) { 404 bus_dmamap_unload( 405 sc->jme_cdata.jme_tx_tag, 406 txd->tx_dmamap); 407 m_freem(txd->tx_m); 408 txd->tx_m = NULL; 409 txd->tx_ndesc = 0; 410 ifp->if_oerrors++; 411 } 412 } 413 } 414 jme_init_tx_ring(sc); 415 416 /* Initialize shadow status block. */ 417 jme_init_ssb(sc); 418 419 /* Program MAC with resolved speed/duplex/flow-control. */ 420 if (sc->jme_flags & JME_FLAG_LINK) { 421 jme_mac_config(sc); 422 423 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 424 425 /* Set Tx ring address to the hardware. */ 426 paddr = sc->jme_cdata.jme_tx_ring_paddr; 427 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 428 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 429 430 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 431 CSR_WRITE_4(sc, JME_RXCSR, 432 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 433 434 /* Set Rx ring address to the hardware. */ 435 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 436 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 437 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 438 } 439 440 /* Restart receiver/transmitter. */ 441 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 442 RXCSR_RXQ_START); 443 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 444 } 445 446 ifp->if_flags |= IFF_RUNNING; 447 ifp->if_flags &= ~IFF_OACTIVE; 448 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 449 450 #ifdef DEVICE_POLLING 451 if (!(ifp->if_flags & IFF_POLLING)) 452 #endif 453 /* Reenable interrupts. */ 454 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 455 } 456 457 /* 458 * Get the current interface media status. 459 */ 460 static void 461 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 462 { 463 struct jme_softc *sc = ifp->if_softc; 464 struct mii_data *mii = device_get_softc(sc->jme_miibus); 465 466 ASSERT_IFNET_SERIALIZED_ALL(ifp); 467 468 mii_pollstat(mii); 469 ifmr->ifm_status = mii->mii_media_status; 470 ifmr->ifm_active = mii->mii_media_active; 471 } 472 473 /* 474 * Set hardware to newly-selected media. 475 */ 476 static int 477 jme_mediachange(struct ifnet *ifp) 478 { 479 struct jme_softc *sc = ifp->if_softc; 480 struct mii_data *mii = device_get_softc(sc->jme_miibus); 481 int error; 482 483 ASSERT_IFNET_SERIALIZED_ALL(ifp); 484 485 if (mii->mii_instance != 0) { 486 struct mii_softc *miisc; 487 488 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 489 mii_phy_reset(miisc); 490 } 491 error = mii_mediachg(mii); 492 493 return (error); 494 } 495 496 static int 497 jme_probe(device_t dev) 498 { 499 const struct jme_dev *sp; 500 uint16_t vid, did; 501 502 vid = pci_get_vendor(dev); 503 did = pci_get_device(dev); 504 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 505 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 506 struct jme_softc *sc = device_get_softc(dev); 507 508 sc->jme_caps = sp->jme_caps; 509 device_set_desc(dev, sp->jme_name); 510 return (0); 511 } 512 } 513 return (ENXIO); 514 } 515 516 static int 517 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 518 { 519 uint32_t reg; 520 int i; 521 522 *val = 0; 523 for (i = JME_TIMEOUT; i > 0; i--) { 524 reg = CSR_READ_4(sc, JME_SMBCSR); 525 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 526 break; 527 DELAY(1); 528 } 529 530 if (i == 0) { 531 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 532 return (ETIMEDOUT); 533 } 534 535 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 536 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 537 for (i = JME_TIMEOUT; i > 0; i--) { 538 DELAY(1); 539 reg = CSR_READ_4(sc, JME_SMBINTF); 540 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 541 break; 542 } 543 544 if (i == 0) { 545 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 546 return (ETIMEDOUT); 547 } 548 549 reg = CSR_READ_4(sc, JME_SMBINTF); 550 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 551 552 return (0); 553 } 554 555 static int 556 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 557 { 558 uint8_t fup, reg, val; 559 uint32_t offset; 560 int match; 561 562 offset = 0; 563 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 564 fup != JME_EEPROM_SIG0) 565 return (ENOENT); 566 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 567 fup != JME_EEPROM_SIG1) 568 return (ENOENT); 569 match = 0; 570 do { 571 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 572 break; 573 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 574 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 575 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 576 break; 577 if (reg >= JME_PAR0 && 578 reg < JME_PAR0 + ETHER_ADDR_LEN) { 579 if (jme_eeprom_read_byte(sc, offset + 2, 580 &val) != 0) 581 break; 582 eaddr[reg - JME_PAR0] = val; 583 match++; 584 } 585 } 586 /* Check for the end of EEPROM descriptor. */ 587 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 588 break; 589 /* Try next eeprom descriptor. */ 590 offset += JME_EEPROM_DESC_BYTES; 591 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 592 593 if (match == ETHER_ADDR_LEN) 594 return (0); 595 596 return (ENOENT); 597 } 598 599 static void 600 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 601 { 602 uint32_t par0, par1; 603 604 /* Read station address. */ 605 par0 = CSR_READ_4(sc, JME_PAR0); 606 par1 = CSR_READ_4(sc, JME_PAR1); 607 par1 &= 0xFFFF; 608 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 609 device_printf(sc->jme_dev, 610 "generating fake ethernet address.\n"); 611 par0 = karc4random(); 612 /* Set OUI to JMicron. */ 613 eaddr[0] = 0x00; 614 eaddr[1] = 0x1B; 615 eaddr[2] = 0x8C; 616 eaddr[3] = (par0 >> 16) & 0xff; 617 eaddr[4] = (par0 >> 8) & 0xff; 618 eaddr[5] = par0 & 0xff; 619 } else { 620 eaddr[0] = (par0 >> 0) & 0xFF; 621 eaddr[1] = (par0 >> 8) & 0xFF; 622 eaddr[2] = (par0 >> 16) & 0xFF; 623 eaddr[3] = (par0 >> 24) & 0xFF; 624 eaddr[4] = (par1 >> 0) & 0xFF; 625 eaddr[5] = (par1 >> 8) & 0xFF; 626 } 627 } 628 629 static int 630 jme_attach(device_t dev) 631 { 632 struct jme_softc *sc = device_get_softc(dev); 633 struct ifnet *ifp = &sc->arpcom.ac_if; 634 uint32_t reg; 635 uint16_t did; 636 uint8_t pcie_ptr, rev; 637 int error = 0, i, j; 638 uint8_t eaddr[ETHER_ADDR_LEN]; 639 640 lwkt_serialize_init(&sc->jme_serialize); 641 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize); 642 for (i = 0; i < JME_NRXRING_MAX; ++i) { 643 lwkt_serialize_init( 644 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize); 645 } 646 647 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN); 648 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX) 649 sc->jme_rx_desc_cnt = JME_NDESC_MAX; 650 651 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN); 652 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX) 653 sc->jme_tx_desc_cnt = JME_NDESC_MAX; 654 655 /* 656 * Calculate rx rings based on ncpus2 657 */ 658 sc->jme_rx_ring_cnt = jme_rx_ring_count; 659 if (sc->jme_rx_ring_cnt <= 0) 660 sc->jme_rx_ring_cnt = JME_NRXRING_1; 661 if (sc->jme_rx_ring_cnt > ncpus2) 662 sc->jme_rx_ring_cnt = ncpus2; 663 664 if (sc->jme_rx_ring_cnt >= JME_NRXRING_4) 665 sc->jme_rx_ring_cnt = JME_NRXRING_4; 666 else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2) 667 sc->jme_rx_ring_cnt = JME_NRXRING_2; 668 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt; 669 670 i = 0; 671 sc->jme_serialize_arr[i++] = &sc->jme_serialize; 672 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize; 673 for (j = 0; j < sc->jme_rx_ring_cnt; ++j) { 674 sc->jme_serialize_arr[i++] = 675 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize; 676 } 677 KKASSERT(i <= JME_NSERIALIZE); 678 sc->jme_serialize_cnt = i; 679 680 sc->jme_cdata.jme_sc = sc; 681 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 682 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i]; 683 684 rdata->jme_sc = sc; 685 rdata->jme_rx_coal = jme_rx_status[i].jme_coal; 686 rdata->jme_rx_comp = jme_rx_status[i].jme_comp; 687 rdata->jme_rx_empty = jme_rx_status[i].jme_empty; 688 rdata->jme_rx_idx = i; 689 } 690 691 sc->jme_dev = dev; 692 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 693 694 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 695 696 callout_init(&sc->jme_tick_ch); 697 698 #ifndef BURN_BRIDGES 699 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 700 uint32_t irq, mem; 701 702 irq = pci_read_config(dev, PCIR_INTLINE, 4); 703 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 704 705 device_printf(dev, "chip is in D%d power mode " 706 "-- setting to D0\n", pci_get_powerstate(dev)); 707 708 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 709 710 pci_write_config(dev, PCIR_INTLINE, irq, 4); 711 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 712 } 713 #endif /* !BURN_BRIDGE */ 714 715 /* Enable bus mastering */ 716 pci_enable_busmaster(dev); 717 718 /* 719 * Allocate IO memory 720 * 721 * JMC250 supports both memory mapped and I/O register space 722 * access. Because I/O register access should use different 723 * BARs to access registers it's waste of time to use I/O 724 * register spce access. JMC250 uses 16K to map entire memory 725 * space. 726 */ 727 sc->jme_mem_rid = JME_PCIR_BAR; 728 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 729 &sc->jme_mem_rid, RF_ACTIVE); 730 if (sc->jme_mem_res == NULL) { 731 device_printf(dev, "can't allocate IO memory\n"); 732 return ENXIO; 733 } 734 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 735 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 736 737 /* 738 * Allocate IRQ 739 */ 740 error = jme_intr_alloc(dev); 741 if (error) 742 goto fail; 743 744 /* 745 * Extract revisions 746 */ 747 reg = CSR_READ_4(sc, JME_CHIPMODE); 748 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 749 CHIPMODE_NOT_FPGA) { 750 sc->jme_caps |= JME_CAP_FPGA; 751 if (bootverbose) { 752 device_printf(dev, "FPGA revision: 0x%04x\n", 753 (reg & CHIPMODE_FPGA_REV_MASK) >> 754 CHIPMODE_FPGA_REV_SHIFT); 755 } 756 } 757 758 /* NOTE: FM revision is put in the upper 4 bits */ 759 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 760 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 761 if (bootverbose) 762 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 763 764 did = pci_get_device(dev); 765 switch (did) { 766 case PCI_PRODUCT_JMICRON_JMC250: 767 if (rev == JME_REV1_A2) 768 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 769 break; 770 771 case PCI_PRODUCT_JMICRON_JMC260: 772 if (rev == JME_REV2) 773 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 774 break; 775 776 default: 777 panic("unknown device id 0x%04x\n", did); 778 } 779 if (rev >= JME_REV2) { 780 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 781 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 782 GHC_TXMAC_CLKSRC_1000; 783 } 784 785 /* Reset the ethernet controller. */ 786 jme_reset(sc); 787 788 /* Map MSI/MSI-X vectors */ 789 jme_set_msinum(sc); 790 791 /* Get station address. */ 792 reg = CSR_READ_4(sc, JME_SMBCSR); 793 if (reg & SMBCSR_EEPROM_PRESENT) 794 error = jme_eeprom_macaddr(sc, eaddr); 795 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 796 if (error != 0 && (bootverbose)) { 797 device_printf(dev, "ethernet hardware address " 798 "not found in EEPROM.\n"); 799 } 800 jme_reg_macaddr(sc, eaddr); 801 } 802 803 /* 804 * Save PHY address. 805 * Integrated JR0211 has fixed PHY address whereas FPGA version 806 * requires PHY probing to get correct PHY address. 807 */ 808 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 809 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 810 GPREG0_PHY_ADDR_MASK; 811 if (bootverbose) { 812 device_printf(dev, "PHY is at address %d.\n", 813 sc->jme_phyaddr); 814 } 815 } else { 816 sc->jme_phyaddr = 0; 817 } 818 819 /* Set max allowable DMA size. */ 820 pcie_ptr = pci_get_pciecap_ptr(dev); 821 if (pcie_ptr != 0) { 822 uint16_t ctrl; 823 824 sc->jme_caps |= JME_CAP_PCIE; 825 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 826 if (bootverbose) { 827 device_printf(dev, "Read request size : %d bytes.\n", 828 128 << ((ctrl >> 12) & 0x07)); 829 device_printf(dev, "TLP payload size : %d bytes.\n", 830 128 << ((ctrl >> 5) & 0x07)); 831 } 832 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 833 case PCIEM_DEVCTL_MAX_READRQ_128: 834 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 835 break; 836 case PCIEM_DEVCTL_MAX_READRQ_256: 837 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 838 break; 839 default: 840 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 841 break; 842 } 843 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 844 } else { 845 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 846 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 847 } 848 849 #ifdef notyet 850 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 851 sc->jme_caps |= JME_CAP_PMCAP; 852 #endif 853 854 /* 855 * Create sysctl tree 856 */ 857 jme_sysctl_node(sc); 858 859 /* Allocate DMA stuffs */ 860 error = jme_dma_alloc(sc); 861 if (error) 862 goto fail; 863 864 ifp->if_softc = sc; 865 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 866 ifp->if_init = jme_init; 867 ifp->if_ioctl = jme_ioctl; 868 ifp->if_start = jme_start; 869 #ifdef DEVICE_POLLING 870 ifp->if_poll = jme_poll; 871 #endif 872 ifp->if_watchdog = jme_watchdog; 873 ifp->if_serialize = jme_serialize; 874 ifp->if_deserialize = jme_deserialize; 875 ifp->if_tryserialize = jme_tryserialize; 876 #ifdef INVARIANTS 877 ifp->if_serialize_assert = jme_serialize_assert; 878 #endif 879 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD); 880 ifq_set_ready(&ifp->if_snd); 881 882 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 883 ifp->if_capabilities = IFCAP_HWCSUM | 884 IFCAP_VLAN_MTU | 885 IFCAP_VLAN_HWTAGGING; 886 if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN) 887 ifp->if_capabilities |= IFCAP_RSS; 888 ifp->if_capenable = ifp->if_capabilities; 889 890 /* 891 * Disable TXCSUM by default to improve bulk data 892 * transmit performance (+20Mbps improvement). 893 */ 894 ifp->if_capenable &= ~IFCAP_TXCSUM; 895 896 if (ifp->if_capenable & IFCAP_TXCSUM) 897 ifp->if_hwassist = JME_CSUM_FEATURES; 898 899 /* Set up MII bus. */ 900 error = mii_phy_probe(dev, &sc->jme_miibus, 901 jme_mediachange, jme_mediastatus); 902 if (error) { 903 device_printf(dev, "no PHY found!\n"); 904 goto fail; 905 } 906 907 /* 908 * Save PHYADDR for FPGA mode PHY. 909 */ 910 if (sc->jme_caps & JME_CAP_FPGA) { 911 struct mii_data *mii = device_get_softc(sc->jme_miibus); 912 913 if (mii->mii_instance != 0) { 914 struct mii_softc *miisc; 915 916 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 917 if (miisc->mii_phy != 0) { 918 sc->jme_phyaddr = miisc->mii_phy; 919 break; 920 } 921 } 922 if (sc->jme_phyaddr != 0) { 923 device_printf(sc->jme_dev, 924 "FPGA PHY is at %d\n", sc->jme_phyaddr); 925 /* vendor magic. */ 926 jme_miibus_writereg(dev, sc->jme_phyaddr, 927 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 928 929 /* XXX should we clear JME_WA_EXTFIFO */ 930 } 931 } 932 } 933 934 ether_ifattach(ifp, eaddr, NULL); 935 936 /* Tell the upper layer(s) we support long frames. */ 937 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 938 939 error = jme_intr_setup(dev); 940 if (error) { 941 ether_ifdetach(ifp); 942 goto fail; 943 } 944 945 return 0; 946 fail: 947 jme_detach(dev); 948 return (error); 949 } 950 951 static int 952 jme_detach(device_t dev) 953 { 954 struct jme_softc *sc = device_get_softc(dev); 955 956 if (device_is_attached(dev)) { 957 struct ifnet *ifp = &sc->arpcom.ac_if; 958 959 ifnet_serialize_all(ifp); 960 jme_stop(sc); 961 jme_intr_teardown(dev); 962 ifnet_deserialize_all(ifp); 963 964 ether_ifdetach(ifp); 965 } 966 967 if (sc->jme_sysctl_tree != NULL) 968 sysctl_ctx_free(&sc->jme_sysctl_ctx); 969 970 if (sc->jme_miibus != NULL) 971 device_delete_child(dev, sc->jme_miibus); 972 bus_generic_detach(dev); 973 974 jme_intr_free(dev); 975 976 if (sc->jme_mem_res != NULL) { 977 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 978 sc->jme_mem_res); 979 } 980 981 jme_dma_free(sc); 982 983 return (0); 984 } 985 986 static void 987 jme_sysctl_node(struct jme_softc *sc) 988 { 989 int coal_max; 990 #ifdef JME_RSS_DEBUG 991 char rx_ring_pkt[32]; 992 int r; 993 #endif 994 995 sysctl_ctx_init(&sc->jme_sysctl_ctx); 996 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 997 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 998 device_get_nameunit(sc->jme_dev), 999 CTLFLAG_RD, 0, ""); 1000 if (sc->jme_sysctl_tree == NULL) { 1001 device_printf(sc->jme_dev, "can't add sysctl node\n"); 1002 return; 1003 } 1004 1005 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1006 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1007 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1008 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 1009 1010 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1011 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1012 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1013 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 1014 1015 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1016 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1017 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 1018 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 1019 1020 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 1021 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1022 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 1023 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 1024 1025 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1026 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1027 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt, 1028 0, "RX desc count"); 1029 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1030 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1031 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt, 1032 0, "TX desc count"); 1033 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1034 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1035 "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt, 1036 0, "RX ring count"); 1037 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1038 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1039 "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse, 1040 0, "RX ring in use"); 1041 #ifdef JME_RSS_DEBUG 1042 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 1043 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1044 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug, 1045 0, "RSS debug level"); 1046 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1047 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r); 1048 SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx, 1049 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 1050 rx_ring_pkt, CTLFLAG_RW, 1051 &sc->jme_rx_ring_pkt[r], 1052 0, "RXed packets"); 1053 } 1054 #endif 1055 1056 /* 1057 * Set default coalesce valves 1058 */ 1059 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1060 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1061 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1062 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1063 1064 /* 1065 * Adjust coalesce valves, in case that the number of TX/RX 1066 * descs are set to small values by users. 1067 * 1068 * NOTE: coal_max will not be zero, since number of descs 1069 * must aligned by JME_NDESC_ALIGN (16 currently) 1070 */ 1071 coal_max = sc->jme_tx_desc_cnt / 6; 1072 if (coal_max < sc->jme_tx_coal_pkt) 1073 sc->jme_tx_coal_pkt = coal_max; 1074 1075 coal_max = sc->jme_rx_desc_cnt / 4; 1076 if (coal_max < sc->jme_rx_coal_pkt) 1077 sc->jme_rx_coal_pkt = coal_max; 1078 } 1079 1080 static int 1081 jme_dma_alloc(struct jme_softc *sc) 1082 { 1083 struct jme_txdesc *txd; 1084 bus_dmamem_t dmem; 1085 int error, i; 1086 1087 sc->jme_cdata.jme_txdesc = 1088 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc), 1089 M_DEVBUF, M_WAITOK | M_ZERO); 1090 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 1091 sc->jme_cdata.jme_rx_data[i].jme_rxdesc = 1092 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc), 1093 M_DEVBUF, M_WAITOK | M_ZERO); 1094 } 1095 1096 /* Create parent ring tag. */ 1097 error = bus_dma_tag_create(NULL,/* parent */ 1098 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1099 sc->jme_lowaddr, /* lowaddr */ 1100 BUS_SPACE_MAXADDR, /* highaddr */ 1101 NULL, NULL, /* filter, filterarg */ 1102 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1103 0, /* nsegments */ 1104 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1105 0, /* flags */ 1106 &sc->jme_cdata.jme_ring_tag); 1107 if (error) { 1108 device_printf(sc->jme_dev, 1109 "could not create parent ring DMA tag.\n"); 1110 return error; 1111 } 1112 1113 /* 1114 * Create DMA stuffs for TX ring 1115 */ 1116 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1117 JME_TX_RING_ALIGN, 0, 1118 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1119 JME_TX_RING_SIZE(sc), 1120 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1121 if (error) { 1122 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1123 return error; 1124 } 1125 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag; 1126 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map; 1127 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr; 1128 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr; 1129 1130 /* 1131 * Create DMA stuffs for RX rings 1132 */ 1133 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 1134 error = jme_rxring_dma_alloc(sc, i); 1135 if (error) 1136 return error; 1137 } 1138 1139 /* Create parent buffer tag. */ 1140 error = bus_dma_tag_create(NULL,/* parent */ 1141 1, 0, /* algnmnt, boundary */ 1142 sc->jme_lowaddr, /* lowaddr */ 1143 BUS_SPACE_MAXADDR, /* highaddr */ 1144 NULL, NULL, /* filter, filterarg */ 1145 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1146 0, /* nsegments */ 1147 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1148 0, /* flags */ 1149 &sc->jme_cdata.jme_buffer_tag); 1150 if (error) { 1151 device_printf(sc->jme_dev, 1152 "could not create parent buffer DMA tag.\n"); 1153 return error; 1154 } 1155 1156 /* 1157 * Create DMA stuffs for shadow status block 1158 */ 1159 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1160 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1161 JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1162 if (error) { 1163 device_printf(sc->jme_dev, 1164 "could not create shadow status block.\n"); 1165 return error; 1166 } 1167 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1168 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1169 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1170 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1171 1172 /* 1173 * Create DMA stuffs for TX buffers 1174 */ 1175 1176 /* Create tag for Tx buffers. */ 1177 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1178 1, 0, /* algnmnt, boundary */ 1179 BUS_SPACE_MAXADDR, /* lowaddr */ 1180 BUS_SPACE_MAXADDR, /* highaddr */ 1181 NULL, NULL, /* filter, filterarg */ 1182 JME_JUMBO_FRAMELEN, /* maxsize */ 1183 JME_MAXTXSEGS, /* nsegments */ 1184 JME_MAXSEGSIZE, /* maxsegsize */ 1185 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1186 &sc->jme_cdata.jme_tx_tag); 1187 if (error != 0) { 1188 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1189 return error; 1190 } 1191 1192 /* Create DMA maps for Tx buffers. */ 1193 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 1194 txd = &sc->jme_cdata.jme_txdesc[i]; 1195 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 1196 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1197 &txd->tx_dmamap); 1198 if (error) { 1199 int j; 1200 1201 device_printf(sc->jme_dev, 1202 "could not create %dth Tx dmamap.\n", i); 1203 1204 for (j = 0; j < i; ++j) { 1205 txd = &sc->jme_cdata.jme_txdesc[j]; 1206 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1207 txd->tx_dmamap); 1208 } 1209 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1210 sc->jme_cdata.jme_tx_tag = NULL; 1211 return error; 1212 } 1213 } 1214 1215 /* 1216 * Create DMA stuffs for RX buffers 1217 */ 1218 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 1219 error = jme_rxbuf_dma_alloc(sc, i); 1220 if (error) 1221 return error; 1222 } 1223 return 0; 1224 } 1225 1226 static void 1227 jme_dma_free(struct jme_softc *sc) 1228 { 1229 struct jme_txdesc *txd; 1230 struct jme_rxdesc *rxd; 1231 struct jme_rxdata *rdata; 1232 int i, r; 1233 1234 /* Tx ring */ 1235 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1236 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1237 sc->jme_cdata.jme_tx_ring_map); 1238 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1239 sc->jme_cdata.jme_tx_ring, 1240 sc->jme_cdata.jme_tx_ring_map); 1241 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1242 sc->jme_cdata.jme_tx_ring_tag = NULL; 1243 } 1244 1245 /* Rx ring */ 1246 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1247 rdata = &sc->jme_cdata.jme_rx_data[r]; 1248 if (rdata->jme_rx_ring_tag != NULL) { 1249 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1250 rdata->jme_rx_ring_map); 1251 bus_dmamem_free(rdata->jme_rx_ring_tag, 1252 rdata->jme_rx_ring, 1253 rdata->jme_rx_ring_map); 1254 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1255 rdata->jme_rx_ring_tag = NULL; 1256 } 1257 } 1258 1259 /* Tx buffers */ 1260 if (sc->jme_cdata.jme_tx_tag != NULL) { 1261 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 1262 txd = &sc->jme_cdata.jme_txdesc[i]; 1263 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1264 txd->tx_dmamap); 1265 } 1266 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1267 sc->jme_cdata.jme_tx_tag = NULL; 1268 } 1269 1270 /* Rx buffers */ 1271 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1272 rdata = &sc->jme_cdata.jme_rx_data[r]; 1273 if (rdata->jme_rx_tag != NULL) { 1274 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 1275 rxd = &rdata->jme_rxdesc[i]; 1276 bus_dmamap_destroy(rdata->jme_rx_tag, 1277 rxd->rx_dmamap); 1278 } 1279 bus_dmamap_destroy(rdata->jme_rx_tag, 1280 rdata->jme_rx_sparemap); 1281 bus_dma_tag_destroy(rdata->jme_rx_tag); 1282 rdata->jme_rx_tag = NULL; 1283 } 1284 } 1285 1286 /* Shadow status block. */ 1287 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1288 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1289 sc->jme_cdata.jme_ssb_map); 1290 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1291 sc->jme_cdata.jme_ssb_block, 1292 sc->jme_cdata.jme_ssb_map); 1293 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1294 sc->jme_cdata.jme_ssb_tag = NULL; 1295 } 1296 1297 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1298 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1299 sc->jme_cdata.jme_buffer_tag = NULL; 1300 } 1301 if (sc->jme_cdata.jme_ring_tag != NULL) { 1302 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1303 sc->jme_cdata.jme_ring_tag = NULL; 1304 } 1305 1306 if (sc->jme_cdata.jme_txdesc != NULL) { 1307 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF); 1308 sc->jme_cdata.jme_txdesc = NULL; 1309 } 1310 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1311 rdata = &sc->jme_cdata.jme_rx_data[r]; 1312 if (rdata->jme_rxdesc != NULL) { 1313 kfree(rdata->jme_rxdesc, M_DEVBUF); 1314 rdata->jme_rxdesc = NULL; 1315 } 1316 } 1317 } 1318 1319 /* 1320 * Make sure the interface is stopped at reboot time. 1321 */ 1322 static int 1323 jme_shutdown(device_t dev) 1324 { 1325 return jme_suspend(dev); 1326 } 1327 1328 #ifdef notyet 1329 /* 1330 * Unlike other ethernet controllers, JMC250 requires 1331 * explicit resetting link speed to 10/100Mbps as gigabit 1332 * link will cunsume more power than 375mA. 1333 * Note, we reset the link speed to 10/100Mbps with 1334 * auto-negotiation but we don't know whether that operation 1335 * would succeed or not as we have no control after powering 1336 * off. If the renegotiation fail WOL may not work. Running 1337 * at 1Gbps draws more power than 375mA at 3.3V which is 1338 * specified in PCI specification and that would result in 1339 * complete shutdowning power to ethernet controller. 1340 * 1341 * TODO 1342 * Save current negotiated media speed/duplex/flow-control 1343 * to softc and restore the same link again after resuming. 1344 * PHY handling such as power down/resetting to 100Mbps 1345 * may be better handled in suspend method in phy driver. 1346 */ 1347 static void 1348 jme_setlinkspeed(struct jme_softc *sc) 1349 { 1350 struct mii_data *mii; 1351 int aneg, i; 1352 1353 JME_LOCK_ASSERT(sc); 1354 1355 mii = device_get_softc(sc->jme_miibus); 1356 mii_pollstat(mii); 1357 aneg = 0; 1358 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1359 switch IFM_SUBTYPE(mii->mii_media_active) { 1360 case IFM_10_T: 1361 case IFM_100_TX: 1362 return; 1363 case IFM_1000_T: 1364 aneg++; 1365 default: 1366 break; 1367 } 1368 } 1369 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1370 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1371 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1372 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1373 BMCR_AUTOEN | BMCR_STARTNEG); 1374 DELAY(1000); 1375 if (aneg != 0) { 1376 /* Poll link state until jme(4) get a 10/100 link. */ 1377 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1378 mii_pollstat(mii); 1379 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1380 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1381 case IFM_10_T: 1382 case IFM_100_TX: 1383 jme_mac_config(sc); 1384 return; 1385 default: 1386 break; 1387 } 1388 } 1389 JME_UNLOCK(sc); 1390 pause("jmelnk", hz); 1391 JME_LOCK(sc); 1392 } 1393 if (i == MII_ANEGTICKS_GIGE) 1394 device_printf(sc->jme_dev, "establishing link failed, " 1395 "WOL may not work!"); 1396 } 1397 /* 1398 * No link, force MAC to have 100Mbps, full-duplex link. 1399 * This is the last resort and may/may not work. 1400 */ 1401 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1402 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1403 jme_mac_config(sc); 1404 } 1405 1406 static void 1407 jme_setwol(struct jme_softc *sc) 1408 { 1409 struct ifnet *ifp = &sc->arpcom.ac_if; 1410 uint32_t gpr, pmcs; 1411 uint16_t pmstat; 1412 int pmc; 1413 1414 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1415 /* No PME capability, PHY power down. */ 1416 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1417 MII_BMCR, BMCR_PDOWN); 1418 return; 1419 } 1420 1421 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1422 pmcs = CSR_READ_4(sc, JME_PMCS); 1423 pmcs &= ~PMCS_WOL_ENB_MASK; 1424 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1425 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1426 /* Enable PME message. */ 1427 gpr |= GPREG0_PME_ENB; 1428 /* For gigabit controllers, reset link speed to 10/100. */ 1429 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1430 jme_setlinkspeed(sc); 1431 } 1432 1433 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1434 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1435 1436 /* Request PME. */ 1437 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1438 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1439 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1440 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1441 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1442 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1443 /* No WOL, PHY power down. */ 1444 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1445 MII_BMCR, BMCR_PDOWN); 1446 } 1447 } 1448 #endif 1449 1450 static int 1451 jme_suspend(device_t dev) 1452 { 1453 struct jme_softc *sc = device_get_softc(dev); 1454 struct ifnet *ifp = &sc->arpcom.ac_if; 1455 1456 ifnet_serialize_all(ifp); 1457 jme_stop(sc); 1458 #ifdef notyet 1459 jme_setwol(sc); 1460 #endif 1461 ifnet_deserialize_all(ifp); 1462 1463 return (0); 1464 } 1465 1466 static int 1467 jme_resume(device_t dev) 1468 { 1469 struct jme_softc *sc = device_get_softc(dev); 1470 struct ifnet *ifp = &sc->arpcom.ac_if; 1471 #ifdef notyet 1472 int pmc; 1473 #endif 1474 1475 ifnet_serialize_all(ifp); 1476 1477 #ifdef notyet 1478 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1479 uint16_t pmstat; 1480 1481 pmstat = pci_read_config(sc->jme_dev, 1482 pmc + PCIR_POWER_STATUS, 2); 1483 /* Disable PME clear PME status. */ 1484 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1485 pci_write_config(sc->jme_dev, 1486 pmc + PCIR_POWER_STATUS, pmstat, 2); 1487 } 1488 #endif 1489 1490 if (ifp->if_flags & IFF_UP) 1491 jme_init(sc); 1492 1493 ifnet_deserialize_all(ifp); 1494 1495 return (0); 1496 } 1497 1498 static int 1499 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1500 { 1501 struct jme_txdesc *txd; 1502 struct jme_desc *desc; 1503 struct mbuf *m; 1504 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1505 int maxsegs, nsegs; 1506 int error, i, prod, symbol_desc; 1507 uint32_t cflags, flag64; 1508 1509 M_ASSERTPKTHDR((*m_head)); 1510 1511 prod = sc->jme_cdata.jme_tx_prod; 1512 txd = &sc->jme_cdata.jme_txdesc[prod]; 1513 1514 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1515 symbol_desc = 1; 1516 else 1517 symbol_desc = 0; 1518 1519 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) - 1520 (JME_TXD_RSVD + symbol_desc); 1521 if (maxsegs > JME_MAXTXSEGS) 1522 maxsegs = JME_MAXTXSEGS; 1523 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc), 1524 ("not enough segments %d\n", maxsegs)); 1525 1526 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag, 1527 txd->tx_dmamap, m_head, 1528 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1529 if (error) 1530 goto fail; 1531 1532 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1533 BUS_DMASYNC_PREWRITE); 1534 1535 m = *m_head; 1536 cflags = 0; 1537 1538 /* Configure checksum offload. */ 1539 if (m->m_pkthdr.csum_flags & CSUM_IP) 1540 cflags |= JME_TD_IPCSUM; 1541 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1542 cflags |= JME_TD_TCPCSUM; 1543 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1544 cflags |= JME_TD_UDPCSUM; 1545 1546 /* Configure VLAN. */ 1547 if (m->m_flags & M_VLANTAG) { 1548 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1549 cflags |= JME_TD_VLAN_TAG; 1550 } 1551 1552 desc = &sc->jme_cdata.jme_tx_ring[prod]; 1553 desc->flags = htole32(cflags); 1554 desc->addr_hi = htole32(m->m_pkthdr.len); 1555 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1556 /* 1557 * Use 64bits TX desc chain format. 1558 * 1559 * The first TX desc of the chain, which is setup here, 1560 * is just a symbol TX desc carrying no payload. 1561 */ 1562 flag64 = JME_TD_64BIT; 1563 desc->buflen = 0; 1564 desc->addr_lo = 0; 1565 1566 /* No effective TX desc is consumed */ 1567 i = 0; 1568 } else { 1569 /* 1570 * Use 32bits TX desc chain format. 1571 * 1572 * The first TX desc of the chain, which is setup here, 1573 * is an effective TX desc carrying the first segment of 1574 * the mbuf chain. 1575 */ 1576 flag64 = 0; 1577 desc->buflen = htole32(txsegs[0].ds_len); 1578 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1579 1580 /* One effective TX desc is consumed */ 1581 i = 1; 1582 } 1583 sc->jme_cdata.jme_tx_cnt++; 1584 KKASSERT(sc->jme_cdata.jme_tx_cnt - i < 1585 sc->jme_tx_desc_cnt - JME_TXD_RSVD); 1586 JME_DESC_INC(prod, sc->jme_tx_desc_cnt); 1587 1588 txd->tx_ndesc = 1 - i; 1589 for (; i < nsegs; i++) { 1590 desc = &sc->jme_cdata.jme_tx_ring[prod]; 1591 desc->flags = htole32(JME_TD_OWN | flag64); 1592 desc->buflen = htole32(txsegs[i].ds_len); 1593 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1594 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1595 1596 sc->jme_cdata.jme_tx_cnt++; 1597 KKASSERT(sc->jme_cdata.jme_tx_cnt <= 1598 sc->jme_tx_desc_cnt - JME_TXD_RSVD); 1599 JME_DESC_INC(prod, sc->jme_tx_desc_cnt); 1600 } 1601 1602 /* Update producer index. */ 1603 sc->jme_cdata.jme_tx_prod = prod; 1604 /* 1605 * Finally request interrupt and give the first descriptor 1606 * owenership to hardware. 1607 */ 1608 desc = txd->tx_desc; 1609 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1610 1611 txd->tx_m = m; 1612 txd->tx_ndesc += nsegs; 1613 1614 return 0; 1615 fail: 1616 m_freem(*m_head); 1617 *m_head = NULL; 1618 return error; 1619 } 1620 1621 static void 1622 jme_start(struct ifnet *ifp) 1623 { 1624 struct jme_softc *sc = ifp->if_softc; 1625 struct mbuf *m_head; 1626 int enq = 0; 1627 1628 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize); 1629 1630 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1631 ifq_purge(&ifp->if_snd); 1632 return; 1633 } 1634 1635 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1636 return; 1637 1638 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc)) 1639 jme_txeof(sc); 1640 1641 while (!ifq_is_empty(&ifp->if_snd)) { 1642 /* 1643 * Check number of available TX descs, always 1644 * leave JME_TXD_RSVD free TX descs. 1645 */ 1646 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1647 sc->jme_tx_desc_cnt - JME_TXD_RSVD) { 1648 ifp->if_flags |= IFF_OACTIVE; 1649 break; 1650 } 1651 1652 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1653 if (m_head == NULL) 1654 break; 1655 1656 /* 1657 * Pack the data into the transmit ring. If we 1658 * don't have room, set the OACTIVE flag and wait 1659 * for the NIC to drain the ring. 1660 */ 1661 if (jme_encap(sc, &m_head)) { 1662 KKASSERT(m_head == NULL); 1663 ifp->if_oerrors++; 1664 ifp->if_flags |= IFF_OACTIVE; 1665 break; 1666 } 1667 enq++; 1668 1669 /* 1670 * If there's a BPF listener, bounce a copy of this frame 1671 * to him. 1672 */ 1673 ETHER_BPF_MTAP(ifp, m_head); 1674 } 1675 1676 if (enq > 0) { 1677 /* 1678 * Reading TXCSR takes very long time under heavy load 1679 * so cache TXCSR value and writes the ORed value with 1680 * the kick command to the TXCSR. This saves one register 1681 * access cycle. 1682 */ 1683 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1684 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1685 /* Set a timeout in case the chip goes out to lunch. */ 1686 ifp->if_timer = JME_TX_TIMEOUT; 1687 } 1688 } 1689 1690 static void 1691 jme_watchdog(struct ifnet *ifp) 1692 { 1693 struct jme_softc *sc = ifp->if_softc; 1694 1695 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1696 1697 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1698 if_printf(ifp, "watchdog timeout (missed link)\n"); 1699 ifp->if_oerrors++; 1700 jme_init(sc); 1701 return; 1702 } 1703 1704 jme_txeof(sc); 1705 if (sc->jme_cdata.jme_tx_cnt == 0) { 1706 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1707 "-- recovering\n"); 1708 if (!ifq_is_empty(&ifp->if_snd)) 1709 if_devstart(ifp); 1710 return; 1711 } 1712 1713 if_printf(ifp, "watchdog timeout\n"); 1714 ifp->if_oerrors++; 1715 jme_init(sc); 1716 if (!ifq_is_empty(&ifp->if_snd)) 1717 if_devstart(ifp); 1718 } 1719 1720 static int 1721 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1722 { 1723 struct jme_softc *sc = ifp->if_softc; 1724 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1725 struct ifreq *ifr = (struct ifreq *)data; 1726 int error = 0, mask; 1727 1728 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1729 1730 switch (cmd) { 1731 case SIOCSIFMTU: 1732 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1733 (!(sc->jme_caps & JME_CAP_JUMBO) && 1734 ifr->ifr_mtu > JME_MAX_MTU)) { 1735 error = EINVAL; 1736 break; 1737 } 1738 1739 if (ifp->if_mtu != ifr->ifr_mtu) { 1740 /* 1741 * No special configuration is required when interface 1742 * MTU is changed but availability of Tx checksum 1743 * offload should be chcked against new MTU size as 1744 * FIFO size is just 2K. 1745 */ 1746 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1747 ifp->if_capenable &= ~IFCAP_TXCSUM; 1748 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1749 } 1750 ifp->if_mtu = ifr->ifr_mtu; 1751 if (ifp->if_flags & IFF_RUNNING) 1752 jme_init(sc); 1753 } 1754 break; 1755 1756 case SIOCSIFFLAGS: 1757 if (ifp->if_flags & IFF_UP) { 1758 if (ifp->if_flags & IFF_RUNNING) { 1759 if ((ifp->if_flags ^ sc->jme_if_flags) & 1760 (IFF_PROMISC | IFF_ALLMULTI)) 1761 jme_set_filter(sc); 1762 } else { 1763 jme_init(sc); 1764 } 1765 } else { 1766 if (ifp->if_flags & IFF_RUNNING) 1767 jme_stop(sc); 1768 } 1769 sc->jme_if_flags = ifp->if_flags; 1770 break; 1771 1772 case SIOCADDMULTI: 1773 case SIOCDELMULTI: 1774 if (ifp->if_flags & IFF_RUNNING) 1775 jme_set_filter(sc); 1776 break; 1777 1778 case SIOCSIFMEDIA: 1779 case SIOCGIFMEDIA: 1780 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1781 break; 1782 1783 case SIOCSIFCAP: 1784 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1785 1786 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1787 ifp->if_capenable ^= IFCAP_TXCSUM; 1788 if (IFCAP_TXCSUM & ifp->if_capenable) 1789 ifp->if_hwassist |= JME_CSUM_FEATURES; 1790 else 1791 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1792 } 1793 if (mask & IFCAP_RXCSUM) { 1794 uint32_t reg; 1795 1796 ifp->if_capenable ^= IFCAP_RXCSUM; 1797 reg = CSR_READ_4(sc, JME_RXMAC); 1798 reg &= ~RXMAC_CSUM_ENB; 1799 if (ifp->if_capenable & IFCAP_RXCSUM) 1800 reg |= RXMAC_CSUM_ENB; 1801 CSR_WRITE_4(sc, JME_RXMAC, reg); 1802 } 1803 1804 if (mask & IFCAP_VLAN_HWTAGGING) { 1805 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1806 jme_set_vlan(sc); 1807 } 1808 1809 if (mask & IFCAP_RSS) { 1810 ifp->if_capenable ^= IFCAP_RSS; 1811 if (ifp->if_flags & IFF_RUNNING) 1812 jme_init(sc); 1813 } 1814 break; 1815 1816 default: 1817 error = ether_ioctl(ifp, cmd, data); 1818 break; 1819 } 1820 return (error); 1821 } 1822 1823 static void 1824 jme_mac_config(struct jme_softc *sc) 1825 { 1826 struct mii_data *mii; 1827 uint32_t ghc, rxmac, txmac, txpause, gp1; 1828 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1829 1830 mii = device_get_softc(sc->jme_miibus); 1831 1832 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1833 DELAY(10); 1834 CSR_WRITE_4(sc, JME_GHC, 0); 1835 ghc = 0; 1836 rxmac = CSR_READ_4(sc, JME_RXMAC); 1837 rxmac &= ~RXMAC_FC_ENB; 1838 txmac = CSR_READ_4(sc, JME_TXMAC); 1839 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1840 txpause = CSR_READ_4(sc, JME_TXPFC); 1841 txpause &= ~TXPFC_PAUSE_ENB; 1842 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1843 ghc |= GHC_FULL_DUPLEX; 1844 rxmac &= ~RXMAC_COLL_DET_ENB; 1845 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1846 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1847 TXMAC_FRAME_BURST); 1848 #ifdef notyet 1849 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1850 txpause |= TXPFC_PAUSE_ENB; 1851 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1852 rxmac |= RXMAC_FC_ENB; 1853 #endif 1854 /* Disable retry transmit timer/retry limit. */ 1855 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1856 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1857 } else { 1858 rxmac |= RXMAC_COLL_DET_ENB; 1859 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1860 /* Enable retry transmit timer/retry limit. */ 1861 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1862 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1863 } 1864 1865 /* 1866 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1867 */ 1868 gp1 = CSR_READ_4(sc, JME_GPREG1); 1869 gp1 &= ~GPREG1_WA_HDX; 1870 1871 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1872 hdx = 1; 1873 1874 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1875 case IFM_10_T: 1876 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 1877 if (hdx) 1878 gp1 |= GPREG1_WA_HDX; 1879 break; 1880 1881 case IFM_100_TX: 1882 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 1883 if (hdx) 1884 gp1 |= GPREG1_WA_HDX; 1885 1886 /* 1887 * Use extended FIFO depth to workaround CRC errors 1888 * emitted by chips before JMC250B 1889 */ 1890 phyconf = JMPHY_CONF_EXTFIFO; 1891 break; 1892 1893 case IFM_1000_T: 1894 if (sc->jme_caps & JME_CAP_FASTETH) 1895 break; 1896 1897 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 1898 if (hdx) 1899 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1900 break; 1901 1902 default: 1903 break; 1904 } 1905 CSR_WRITE_4(sc, JME_GHC, ghc); 1906 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1907 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1908 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1909 1910 if (sc->jme_workaround & JME_WA_EXTFIFO) { 1911 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1912 JMPHY_CONF, phyconf); 1913 } 1914 if (sc->jme_workaround & JME_WA_HDX) 1915 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1916 } 1917 1918 static void 1919 jme_intr(void *xsc) 1920 { 1921 struct jme_softc *sc = xsc; 1922 struct ifnet *ifp = &sc->arpcom.ac_if; 1923 uint32_t status; 1924 int r; 1925 1926 ASSERT_SERIALIZED(&sc->jme_serialize); 1927 1928 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1929 if (status == 0 || status == 0xFFFFFFFF) 1930 return; 1931 1932 /* Disable interrupts. */ 1933 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1934 1935 status = CSR_READ_4(sc, JME_INTR_STATUS); 1936 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1937 goto back; 1938 1939 /* Reset PCC counter/timer and Ack interrupts. */ 1940 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1941 1942 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1943 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1944 1945 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 1946 if (status & jme_rx_status[r].jme_coal) { 1947 status |= jme_rx_status[r].jme_coal | 1948 jme_rx_status[r].jme_comp; 1949 } 1950 } 1951 1952 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1953 1954 if (ifp->if_flags & IFF_RUNNING) { 1955 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1956 jme_rx_intr(sc, status); 1957 1958 if (status & INTR_RXQ_DESC_EMPTY) { 1959 /* 1960 * Notify hardware availability of new Rx buffers. 1961 * Reading RXCSR takes very long time under heavy 1962 * load so cache RXCSR value and writes the ORed 1963 * value with the kick command to the RXCSR. This 1964 * saves one register access cycle. 1965 */ 1966 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1967 RXCSR_RX_ENB | RXCSR_RXQ_START); 1968 } 1969 1970 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1971 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize); 1972 jme_txeof(sc); 1973 if (!ifq_is_empty(&ifp->if_snd)) 1974 if_devstart(ifp); 1975 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize); 1976 } 1977 } 1978 back: 1979 /* Reenable interrupts. */ 1980 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1981 } 1982 1983 static void 1984 jme_txeof(struct jme_softc *sc) 1985 { 1986 struct ifnet *ifp = &sc->arpcom.ac_if; 1987 struct jme_txdesc *txd; 1988 uint32_t status; 1989 int cons, nsegs; 1990 1991 cons = sc->jme_cdata.jme_tx_cons; 1992 if (cons == sc->jme_cdata.jme_tx_prod) 1993 return; 1994 1995 /* 1996 * Go through our Tx list and free mbufs for those 1997 * frames which have been transmitted. 1998 */ 1999 while (cons != sc->jme_cdata.jme_tx_prod) { 2000 txd = &sc->jme_cdata.jme_txdesc[cons]; 2001 KASSERT(txd->tx_m != NULL, 2002 ("%s: freeing NULL mbuf!\n", __func__)); 2003 2004 status = le32toh(txd->tx_desc->flags); 2005 if ((status & JME_TD_OWN) == JME_TD_OWN) 2006 break; 2007 2008 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2009 ifp->if_oerrors++; 2010 } else { 2011 ifp->if_opackets++; 2012 if (status & JME_TD_COLLISION) { 2013 ifp->if_collisions += 2014 le32toh(txd->tx_desc->buflen) & 2015 JME_TD_BUF_LEN_MASK; 2016 } 2017 } 2018 2019 /* 2020 * Only the first descriptor of multi-descriptor 2021 * transmission is updated so driver have to skip entire 2022 * chained buffers for the transmiited frame. In other 2023 * words, JME_TD_OWN bit is valid only at the first 2024 * descriptor of a multi-descriptor transmission. 2025 */ 2026 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2027 sc->jme_cdata.jme_tx_ring[cons].flags = 0; 2028 JME_DESC_INC(cons, sc->jme_tx_desc_cnt); 2029 } 2030 2031 /* Reclaim transferred mbufs. */ 2032 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2033 m_freem(txd->tx_m); 2034 txd->tx_m = NULL; 2035 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2036 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2037 ("%s: Active Tx desc counter was garbled\n", __func__)); 2038 txd->tx_ndesc = 0; 2039 } 2040 sc->jme_cdata.jme_tx_cons = cons; 2041 2042 if (sc->jme_cdata.jme_tx_cnt == 0) 2043 ifp->if_timer = 0; 2044 2045 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 2046 sc->jme_tx_desc_cnt - JME_TXD_RSVD) 2047 ifp->if_flags &= ~IFF_OACTIVE; 2048 } 2049 2050 static __inline void 2051 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count) 2052 { 2053 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2054 int i; 2055 2056 for (i = 0; i < count; ++i) { 2057 struct jme_desc *desc = &rdata->jme_rx_ring[cons]; 2058 2059 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2060 desc->buflen = htole32(MCLBYTES); 2061 JME_DESC_INC(cons, sc->jme_rx_desc_cnt); 2062 } 2063 } 2064 2065 static __inline struct pktinfo * 2066 jme_pktinfo(struct pktinfo *pi, uint32_t flags) 2067 { 2068 if (flags & JME_RD_IPV4) 2069 pi->pi_netisr = NETISR_IP; 2070 else if (flags & JME_RD_IPV6) 2071 pi->pi_netisr = NETISR_IPV6; 2072 else 2073 return NULL; 2074 2075 pi->pi_flags = 0; 2076 pi->pi_l3proto = IPPROTO_UNKNOWN; 2077 2078 if (flags & JME_RD_MORE_FRAG) 2079 pi->pi_flags |= PKTINFO_FLAG_FRAG; 2080 else if (flags & JME_RD_TCP) 2081 pi->pi_l3proto = IPPROTO_TCP; 2082 else if (flags & JME_RD_UDP) 2083 pi->pi_l3proto = IPPROTO_UDP; 2084 else 2085 pi = NULL; 2086 return pi; 2087 } 2088 2089 /* Receive a frame. */ 2090 static void 2091 jme_rxpkt(struct jme_softc *sc, int ring) 2092 { 2093 struct ifnet *ifp = &sc->arpcom.ac_if; 2094 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2095 struct jme_desc *desc; 2096 struct jme_rxdesc *rxd; 2097 struct mbuf *mp, *m; 2098 uint32_t flags, status, hash, hashinfo; 2099 int cons, count, nsegs; 2100 2101 cons = rdata->jme_rx_cons; 2102 desc = &rdata->jme_rx_ring[cons]; 2103 flags = le32toh(desc->flags); 2104 status = le32toh(desc->buflen); 2105 hash = le32toh(desc->addr_hi); 2106 hashinfo = le32toh(desc->addr_lo); 2107 nsegs = JME_RX_NSEGS(status); 2108 2109 JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, " 2110 "hash 0x%08x, hash info 0x%08x\n", 2111 ring, flags, hash, hashinfo); 2112 2113 if (status & JME_RX_ERR_STAT) { 2114 ifp->if_ierrors++; 2115 jme_discard_rxbufs(sc, ring, cons, nsegs); 2116 #ifdef JME_SHOW_ERRORS 2117 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2118 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2119 #endif 2120 rdata->jme_rx_cons += nsegs; 2121 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt; 2122 return; 2123 } 2124 2125 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2126 for (count = 0; count < nsegs; count++, 2127 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) { 2128 rxd = &rdata->jme_rxdesc[cons]; 2129 mp = rxd->rx_m; 2130 2131 /* Add a new receive buffer to the ring. */ 2132 if (jme_newbuf(sc, ring, rxd, 0) != 0) { 2133 ifp->if_iqdrops++; 2134 /* Reuse buffer. */ 2135 jme_discard_rxbufs(sc, ring, cons, nsegs - count); 2136 if (rdata->jme_rxhead != NULL) { 2137 m_freem(rdata->jme_rxhead); 2138 JME_RXCHAIN_RESET(sc, ring); 2139 } 2140 break; 2141 } 2142 2143 /* 2144 * Assume we've received a full sized frame. 2145 * Actual size is fixed when we encounter the end of 2146 * multi-segmented frame. 2147 */ 2148 mp->m_len = MCLBYTES; 2149 2150 /* Chain received mbufs. */ 2151 if (rdata->jme_rxhead == NULL) { 2152 rdata->jme_rxhead = mp; 2153 rdata->jme_rxtail = mp; 2154 } else { 2155 /* 2156 * Receive processor can receive a maximum frame 2157 * size of 65535 bytes. 2158 */ 2159 rdata->jme_rxtail->m_next = mp; 2160 rdata->jme_rxtail = mp; 2161 } 2162 2163 if (count == nsegs - 1) { 2164 struct pktinfo pi0, *pi; 2165 2166 /* Last desc. for this frame. */ 2167 m = rdata->jme_rxhead; 2168 m->m_pkthdr.len = rdata->jme_rxlen; 2169 if (nsegs > 1) { 2170 /* Set first mbuf size. */ 2171 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2172 /* Set last mbuf size. */ 2173 mp->m_len = rdata->jme_rxlen - 2174 ((MCLBYTES - JME_RX_PAD_BYTES) + 2175 (MCLBYTES * (nsegs - 2))); 2176 } else { 2177 m->m_len = rdata->jme_rxlen; 2178 } 2179 m->m_pkthdr.rcvif = ifp; 2180 2181 /* 2182 * Account for 10bytes auto padding which is used 2183 * to align IP header on 32bit boundary. Also note, 2184 * CRC bytes is automatically removed by the 2185 * hardware. 2186 */ 2187 m->m_data += JME_RX_PAD_BYTES; 2188 2189 /* Set checksum information. */ 2190 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2191 (flags & JME_RD_IPV4)) { 2192 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2193 if (flags & JME_RD_IPCSUM) 2194 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2195 if ((flags & JME_RD_MORE_FRAG) == 0 && 2196 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2197 (JME_RD_TCP | JME_RD_TCPCSUM) || 2198 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2199 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2200 m->m_pkthdr.csum_flags |= 2201 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2202 m->m_pkthdr.csum_data = 0xffff; 2203 } 2204 } 2205 2206 /* Check for VLAN tagged packets. */ 2207 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2208 (flags & JME_RD_VLAN_TAG)) { 2209 m->m_pkthdr.ether_vlantag = 2210 flags & JME_RD_VLAN_MASK; 2211 m->m_flags |= M_VLANTAG; 2212 } 2213 2214 ifp->if_ipackets++; 2215 2216 if (ifp->if_capenable & IFCAP_RSS) 2217 pi = jme_pktinfo(&pi0, flags); 2218 else 2219 pi = NULL; 2220 2221 if (pi != NULL && 2222 (hashinfo & JME_RD_HASH_FN_MASK) != 0) { 2223 m->m_flags |= M_HASH; 2224 m->m_pkthdr.hash = toeplitz_hash(hash); 2225 } 2226 2227 #ifdef JME_RSS_DEBUG 2228 if (pi != NULL) { 2229 JME_RSS_DPRINTF(sc, 10, 2230 "isr %d flags %08x, l3 %d %s\n", 2231 pi->pi_netisr, pi->pi_flags, 2232 pi->pi_l3proto, 2233 (m->m_flags & M_HASH) ? "hash" : ""); 2234 } 2235 #endif 2236 2237 /* Pass it on. */ 2238 ether_input_pkt(ifp, m, pi); 2239 2240 /* Reset mbuf chains. */ 2241 JME_RXCHAIN_RESET(sc, ring); 2242 #ifdef JME_RSS_DEBUG 2243 sc->jme_rx_ring_pkt[ring]++; 2244 #endif 2245 } 2246 } 2247 2248 rdata->jme_rx_cons += nsegs; 2249 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt; 2250 } 2251 2252 static void 2253 jme_rxeof(struct jme_softc *sc, int ring, int count) 2254 { 2255 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2256 struct jme_desc *desc; 2257 int nsegs, pktlen; 2258 2259 for (;;) { 2260 #ifdef DEVICE_POLLING 2261 if (count >= 0 && count-- == 0) 2262 break; 2263 #endif 2264 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2265 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2266 break; 2267 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2268 break; 2269 2270 /* 2271 * Check number of segments against received bytes. 2272 * Non-matching value would indicate that hardware 2273 * is still trying to update Rx descriptors. I'm not 2274 * sure whether this check is needed. 2275 */ 2276 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2277 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2278 if (nsegs != howmany(pktlen, MCLBYTES)) { 2279 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) " 2280 "and packet size(%d) mismach\n", 2281 nsegs, pktlen); 2282 break; 2283 } 2284 2285 /* Received a frame. */ 2286 jme_rxpkt(sc, ring); 2287 } 2288 } 2289 2290 static void 2291 jme_tick(void *xsc) 2292 { 2293 struct jme_softc *sc = xsc; 2294 struct ifnet *ifp = &sc->arpcom.ac_if; 2295 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2296 2297 ifnet_serialize_all(ifp); 2298 2299 mii_tick(mii); 2300 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2301 2302 ifnet_deserialize_all(ifp); 2303 } 2304 2305 static void 2306 jme_reset(struct jme_softc *sc) 2307 { 2308 uint32_t val; 2309 2310 /* Make sure that TX and RX are stopped */ 2311 jme_stop_tx(sc); 2312 jme_stop_rx(sc); 2313 2314 /* Start reset */ 2315 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2316 DELAY(20); 2317 2318 /* 2319 * Hold reset bit before stop reset 2320 */ 2321 2322 /* Disable TXMAC and TXOFL clock sources */ 2323 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2324 /* Disable RXMAC clock source */ 2325 val = CSR_READ_4(sc, JME_GPREG1); 2326 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2327 /* Flush */ 2328 CSR_READ_4(sc, JME_GHC); 2329 2330 /* Stop reset */ 2331 CSR_WRITE_4(sc, JME_GHC, 0); 2332 /* Flush */ 2333 CSR_READ_4(sc, JME_GHC); 2334 2335 /* 2336 * Clear reset bit after stop reset 2337 */ 2338 2339 /* Enable TXMAC and TXOFL clock sources */ 2340 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2341 /* Enable RXMAC clock source */ 2342 val = CSR_READ_4(sc, JME_GPREG1); 2343 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2344 /* Flush */ 2345 CSR_READ_4(sc, JME_GHC); 2346 2347 /* Disable TXMAC and TXOFL clock sources */ 2348 CSR_WRITE_4(sc, JME_GHC, 0); 2349 /* Disable RXMAC clock source */ 2350 val = CSR_READ_4(sc, JME_GPREG1); 2351 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC); 2352 /* Flush */ 2353 CSR_READ_4(sc, JME_GHC); 2354 2355 /* Enable TX and RX */ 2356 val = CSR_READ_4(sc, JME_TXCSR); 2357 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB); 2358 val = CSR_READ_4(sc, JME_RXCSR); 2359 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB); 2360 /* Flush */ 2361 CSR_READ_4(sc, JME_TXCSR); 2362 CSR_READ_4(sc, JME_RXCSR); 2363 2364 /* Enable TXMAC and TXOFL clock sources */ 2365 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC); 2366 /* Eisable RXMAC clock source */ 2367 val = CSR_READ_4(sc, JME_GPREG1); 2368 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC); 2369 /* Flush */ 2370 CSR_READ_4(sc, JME_GHC); 2371 2372 /* Stop TX and RX */ 2373 jme_stop_tx(sc); 2374 jme_stop_rx(sc); 2375 } 2376 2377 static void 2378 jme_init(void *xsc) 2379 { 2380 struct jme_softc *sc = xsc; 2381 struct ifnet *ifp = &sc->arpcom.ac_if; 2382 struct mii_data *mii; 2383 uint8_t eaddr[ETHER_ADDR_LEN]; 2384 bus_addr_t paddr; 2385 uint32_t reg; 2386 int error, r; 2387 2388 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2389 2390 /* 2391 * Cancel any pending I/O. 2392 */ 2393 jme_stop(sc); 2394 2395 /* 2396 * Reset the chip to a known state. 2397 */ 2398 jme_reset(sc); 2399 2400 /* 2401 * Setup MSI/MSI-X vectors to interrupts mapping 2402 */ 2403 jme_set_msinum(sc); 2404 2405 sc->jme_txd_spare = 2406 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES); 2407 KKASSERT(sc->jme_txd_spare >= 1); 2408 2409 /* 2410 * If we use 64bit address mode for transmitting, each Tx request 2411 * needs one more symbol descriptor. 2412 */ 2413 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 2414 sc->jme_txd_spare += 1; 2415 2416 if (ifp->if_capenable & IFCAP_RSS) 2417 jme_enable_rss(sc); 2418 else 2419 jme_disable_rss(sc); 2420 2421 /* Init RX descriptors */ 2422 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2423 error = jme_init_rx_ring(sc, r); 2424 if (error) { 2425 if_printf(ifp, "initialization failed: " 2426 "no memory for %dth RX ring.\n", r); 2427 jme_stop(sc); 2428 return; 2429 } 2430 } 2431 2432 /* Init TX descriptors */ 2433 jme_init_tx_ring(sc); 2434 2435 /* Initialize shadow status block. */ 2436 jme_init_ssb(sc); 2437 2438 /* Reprogram the station address. */ 2439 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2440 CSR_WRITE_4(sc, JME_PAR0, 2441 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2442 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2443 2444 /* 2445 * Configure Tx queue. 2446 * Tx priority queue weight value : 0 2447 * Tx FIFO threshold for processing next packet : 16QW 2448 * Maximum Tx DMA length : 512 2449 * Allow Tx DMA burst. 2450 */ 2451 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2452 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2453 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2454 sc->jme_txcsr |= sc->jme_tx_dma_size; 2455 sc->jme_txcsr |= TXCSR_DMA_BURST; 2456 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2457 2458 /* Set Tx descriptor counter. */ 2459 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt); 2460 2461 /* Set Tx ring address to the hardware. */ 2462 paddr = sc->jme_cdata.jme_tx_ring_paddr; 2463 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2464 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2465 2466 /* Configure TxMAC parameters. */ 2467 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2468 reg |= TXMAC_THRESH_1_PKT; 2469 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2470 CSR_WRITE_4(sc, JME_TXMAC, reg); 2471 2472 /* 2473 * Configure Rx queue. 2474 * FIFO full threshold for transmitting Tx pause packet : 128T 2475 * FIFO threshold for processing next packet : 128QW 2476 * Rx queue 0 select 2477 * Max Rx DMA length : 128 2478 * Rx descriptor retry : 32 2479 * Rx descriptor retry time gap : 256ns 2480 * Don't receive runt/bad frame. 2481 */ 2482 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2483 #if 0 2484 /* 2485 * Since Rx FIFO size is 4K bytes, receiving frames larger 2486 * than 4K bytes will suffer from Rx FIFO overruns. So 2487 * decrease FIFO threshold to reduce the FIFO overruns for 2488 * frames larger than 4000 bytes. 2489 * For best performance of standard MTU sized frames use 2490 * maximum allowable FIFO threshold, 128QW. 2491 */ 2492 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2493 JME_RX_FIFO_SIZE) 2494 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2495 else 2496 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2497 #else 2498 /* Improve PCI Express compatibility */ 2499 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2500 #endif 2501 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2502 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2503 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2504 /* XXX TODO DROP_BAD */ 2505 2506 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2507 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2508 2509 /* Set Rx descriptor counter. */ 2510 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt); 2511 2512 /* Set Rx ring address to the hardware. */ 2513 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 2514 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2515 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2516 } 2517 2518 /* Clear receive filter. */ 2519 CSR_WRITE_4(sc, JME_RXMAC, 0); 2520 2521 /* Set up the receive filter. */ 2522 jme_set_filter(sc); 2523 jme_set_vlan(sc); 2524 2525 /* 2526 * Disable all WOL bits as WOL can interfere normal Rx 2527 * operation. Also clear WOL detection status bits. 2528 */ 2529 reg = CSR_READ_4(sc, JME_PMCS); 2530 reg &= ~PMCS_WOL_ENB_MASK; 2531 CSR_WRITE_4(sc, JME_PMCS, reg); 2532 2533 /* 2534 * Pad 10bytes right before received frame. This will greatly 2535 * help Rx performance on strict-alignment architectures as 2536 * it does not need to copy the frame to align the payload. 2537 */ 2538 reg = CSR_READ_4(sc, JME_RXMAC); 2539 reg |= RXMAC_PAD_10BYTES; 2540 2541 if (ifp->if_capenable & IFCAP_RXCSUM) 2542 reg |= RXMAC_CSUM_ENB; 2543 CSR_WRITE_4(sc, JME_RXMAC, reg); 2544 2545 /* Configure general purpose reg0 */ 2546 reg = CSR_READ_4(sc, JME_GPREG0); 2547 reg &= ~GPREG0_PCC_UNIT_MASK; 2548 /* Set PCC timer resolution to micro-seconds unit. */ 2549 reg |= GPREG0_PCC_UNIT_US; 2550 /* 2551 * Disable all shadow register posting as we have to read 2552 * JME_INTR_STATUS register in jme_intr. Also it seems 2553 * that it's hard to synchronize interrupt status between 2554 * hardware and software with shadow posting due to 2555 * requirements of bus_dmamap_sync(9). 2556 */ 2557 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2558 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2559 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2560 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2561 /* Disable posting of DW0. */ 2562 reg &= ~GPREG0_POST_DW0_ENB; 2563 /* Clear PME message. */ 2564 reg &= ~GPREG0_PME_ENB; 2565 /* Set PHY address. */ 2566 reg &= ~GPREG0_PHY_ADDR_MASK; 2567 reg |= sc->jme_phyaddr; 2568 CSR_WRITE_4(sc, JME_GPREG0, reg); 2569 2570 /* Configure Tx queue 0 packet completion coalescing. */ 2571 jme_set_tx_coal(sc); 2572 2573 /* Configure Rx queue 0 packet completion coalescing. */ 2574 jme_set_rx_coal(sc); 2575 2576 /* Configure shadow status block but don't enable posting. */ 2577 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2578 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2579 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2580 2581 /* Disable Timer 1 and Timer 2. */ 2582 CSR_WRITE_4(sc, JME_TIMER1, 0); 2583 CSR_WRITE_4(sc, JME_TIMER2, 0); 2584 2585 /* Configure retry transmit period, retry limit value. */ 2586 CSR_WRITE_4(sc, JME_TXTRHD, 2587 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2588 TXTRHD_RT_PERIOD_MASK) | 2589 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2590 TXTRHD_RT_LIMIT_SHIFT)); 2591 2592 #ifdef DEVICE_POLLING 2593 if (!(ifp->if_flags & IFF_POLLING)) 2594 #endif 2595 /* Initialize the interrupt mask. */ 2596 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2597 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2598 2599 /* 2600 * Enabling Tx/Rx DMA engines and Rx queue processing is 2601 * done after detection of valid link in jme_miibus_statchg. 2602 */ 2603 sc->jme_flags &= ~JME_FLAG_LINK; 2604 2605 /* Set the current media. */ 2606 mii = device_get_softc(sc->jme_miibus); 2607 mii_mediachg(mii); 2608 2609 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2610 2611 ifp->if_flags |= IFF_RUNNING; 2612 ifp->if_flags &= ~IFF_OACTIVE; 2613 } 2614 2615 static void 2616 jme_stop(struct jme_softc *sc) 2617 { 2618 struct ifnet *ifp = &sc->arpcom.ac_if; 2619 struct jme_txdesc *txd; 2620 struct jme_rxdesc *rxd; 2621 struct jme_rxdata *rdata; 2622 int i, r; 2623 2624 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2625 2626 /* 2627 * Mark the interface down and cancel the watchdog timer. 2628 */ 2629 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2630 ifp->if_timer = 0; 2631 2632 callout_stop(&sc->jme_tick_ch); 2633 sc->jme_flags &= ~JME_FLAG_LINK; 2634 2635 /* 2636 * Disable interrupts. 2637 */ 2638 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2639 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2640 2641 /* Disable updating shadow status block. */ 2642 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2643 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2644 2645 /* Stop receiver, transmitter. */ 2646 jme_stop_rx(sc); 2647 jme_stop_tx(sc); 2648 2649 /* 2650 * Free partial finished RX segments 2651 */ 2652 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2653 rdata = &sc->jme_cdata.jme_rx_data[r]; 2654 if (rdata->jme_rxhead != NULL) 2655 m_freem(rdata->jme_rxhead); 2656 JME_RXCHAIN_RESET(sc, r); 2657 } 2658 2659 /* 2660 * Free RX and TX mbufs still in the queues. 2661 */ 2662 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2663 rdata = &sc->jme_cdata.jme_rx_data[r]; 2664 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 2665 rxd = &rdata->jme_rxdesc[i]; 2666 if (rxd->rx_m != NULL) { 2667 bus_dmamap_unload(rdata->jme_rx_tag, 2668 rxd->rx_dmamap); 2669 m_freem(rxd->rx_m); 2670 rxd->rx_m = NULL; 2671 } 2672 } 2673 } 2674 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 2675 txd = &sc->jme_cdata.jme_txdesc[i]; 2676 if (txd->tx_m != NULL) { 2677 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 2678 txd->tx_dmamap); 2679 m_freem(txd->tx_m); 2680 txd->tx_m = NULL; 2681 txd->tx_ndesc = 0; 2682 } 2683 } 2684 } 2685 2686 static void 2687 jme_stop_tx(struct jme_softc *sc) 2688 { 2689 uint32_t reg; 2690 int i; 2691 2692 reg = CSR_READ_4(sc, JME_TXCSR); 2693 if ((reg & TXCSR_TX_ENB) == 0) 2694 return; 2695 reg &= ~TXCSR_TX_ENB; 2696 CSR_WRITE_4(sc, JME_TXCSR, reg); 2697 for (i = JME_TIMEOUT; i > 0; i--) { 2698 DELAY(1); 2699 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2700 break; 2701 } 2702 if (i == 0) 2703 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2704 } 2705 2706 static void 2707 jme_stop_rx(struct jme_softc *sc) 2708 { 2709 uint32_t reg; 2710 int i; 2711 2712 reg = CSR_READ_4(sc, JME_RXCSR); 2713 if ((reg & RXCSR_RX_ENB) == 0) 2714 return; 2715 reg &= ~RXCSR_RX_ENB; 2716 CSR_WRITE_4(sc, JME_RXCSR, reg); 2717 for (i = JME_TIMEOUT; i > 0; i--) { 2718 DELAY(1); 2719 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2720 break; 2721 } 2722 if (i == 0) 2723 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2724 } 2725 2726 static void 2727 jme_init_tx_ring(struct jme_softc *sc) 2728 { 2729 struct jme_chain_data *cd; 2730 struct jme_txdesc *txd; 2731 int i; 2732 2733 sc->jme_cdata.jme_tx_prod = 0; 2734 sc->jme_cdata.jme_tx_cons = 0; 2735 sc->jme_cdata.jme_tx_cnt = 0; 2736 2737 cd = &sc->jme_cdata; 2738 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc)); 2739 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 2740 txd = &sc->jme_cdata.jme_txdesc[i]; 2741 txd->tx_m = NULL; 2742 txd->tx_desc = &cd->jme_tx_ring[i]; 2743 txd->tx_ndesc = 0; 2744 } 2745 } 2746 2747 static void 2748 jme_init_ssb(struct jme_softc *sc) 2749 { 2750 struct jme_chain_data *cd; 2751 2752 cd = &sc->jme_cdata; 2753 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 2754 } 2755 2756 static int 2757 jme_init_rx_ring(struct jme_softc *sc, int ring) 2758 { 2759 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2760 struct jme_rxdesc *rxd; 2761 int i; 2762 2763 KKASSERT(rdata->jme_rxhead == NULL && 2764 rdata->jme_rxtail == NULL && 2765 rdata->jme_rxlen == 0); 2766 rdata->jme_rx_cons = 0; 2767 2768 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc)); 2769 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 2770 int error; 2771 2772 rxd = &rdata->jme_rxdesc[i]; 2773 rxd->rx_m = NULL; 2774 rxd->rx_desc = &rdata->jme_rx_ring[i]; 2775 error = jme_newbuf(sc, ring, rxd, 1); 2776 if (error) 2777 return error; 2778 } 2779 return 0; 2780 } 2781 2782 static int 2783 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init) 2784 { 2785 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2786 struct jme_desc *desc; 2787 struct mbuf *m; 2788 bus_dma_segment_t segs; 2789 bus_dmamap_t map; 2790 int error, nsegs; 2791 2792 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2793 if (m == NULL) 2794 return ENOBUFS; 2795 /* 2796 * JMC250 has 64bit boundary alignment limitation so jme(4) 2797 * takes advantage of 10 bytes padding feature of hardware 2798 * in order not to copy entire frame to align IP header on 2799 * 32bit boundary. 2800 */ 2801 m->m_len = m->m_pkthdr.len = MCLBYTES; 2802 2803 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 2804 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 2805 BUS_DMA_NOWAIT); 2806 if (error) { 2807 m_freem(m); 2808 if (init) 2809 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2810 return error; 2811 } 2812 2813 if (rxd->rx_m != NULL) { 2814 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 2815 BUS_DMASYNC_POSTREAD); 2816 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 2817 } 2818 map = rxd->rx_dmamap; 2819 rxd->rx_dmamap = rdata->jme_rx_sparemap; 2820 rdata->jme_rx_sparemap = map; 2821 rxd->rx_m = m; 2822 2823 desc = rxd->rx_desc; 2824 desc->buflen = htole32(segs.ds_len); 2825 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr)); 2826 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr)); 2827 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2828 2829 return 0; 2830 } 2831 2832 static void 2833 jme_set_vlan(struct jme_softc *sc) 2834 { 2835 struct ifnet *ifp = &sc->arpcom.ac_if; 2836 uint32_t reg; 2837 2838 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2839 2840 reg = CSR_READ_4(sc, JME_RXMAC); 2841 reg &= ~RXMAC_VLAN_ENB; 2842 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 2843 reg |= RXMAC_VLAN_ENB; 2844 CSR_WRITE_4(sc, JME_RXMAC, reg); 2845 } 2846 2847 static void 2848 jme_set_filter(struct jme_softc *sc) 2849 { 2850 struct ifnet *ifp = &sc->arpcom.ac_if; 2851 struct ifmultiaddr *ifma; 2852 uint32_t crc; 2853 uint32_t mchash[2]; 2854 uint32_t rxcfg; 2855 2856 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2857 2858 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2859 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2860 RXMAC_ALLMULTI); 2861 2862 /* 2863 * Always accept frames destined to our station address. 2864 * Always accept broadcast frames. 2865 */ 2866 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2867 2868 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2869 if (ifp->if_flags & IFF_PROMISC) 2870 rxcfg |= RXMAC_PROMISC; 2871 if (ifp->if_flags & IFF_ALLMULTI) 2872 rxcfg |= RXMAC_ALLMULTI; 2873 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 2874 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 2875 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2876 return; 2877 } 2878 2879 /* 2880 * Set up the multicast address filter by passing all multicast 2881 * addresses through a CRC generator, and then using the low-order 2882 * 6 bits as an index into the 64 bit multicast hash table. The 2883 * high order bits select the register, while the rest of the bits 2884 * select the bit within the register. 2885 */ 2886 rxcfg |= RXMAC_MULTICAST; 2887 bzero(mchash, sizeof(mchash)); 2888 2889 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2890 if (ifma->ifma_addr->sa_family != AF_LINK) 2891 continue; 2892 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2893 ifma->ifma_addr), ETHER_ADDR_LEN); 2894 2895 /* Just want the 6 least significant bits. */ 2896 crc &= 0x3f; 2897 2898 /* Set the corresponding bit in the hash table. */ 2899 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2900 } 2901 2902 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2903 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2904 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2905 } 2906 2907 static int 2908 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 2909 { 2910 struct jme_softc *sc = arg1; 2911 struct ifnet *ifp = &sc->arpcom.ac_if; 2912 int error, v; 2913 2914 ifnet_serialize_all(ifp); 2915 2916 v = sc->jme_tx_coal_to; 2917 error = sysctl_handle_int(oidp, &v, 0, req); 2918 if (error || req->newptr == NULL) 2919 goto back; 2920 2921 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 2922 error = EINVAL; 2923 goto back; 2924 } 2925 2926 if (v != sc->jme_tx_coal_to) { 2927 sc->jme_tx_coal_to = v; 2928 if (ifp->if_flags & IFF_RUNNING) 2929 jme_set_tx_coal(sc); 2930 } 2931 back: 2932 ifnet_deserialize_all(ifp); 2933 return error; 2934 } 2935 2936 static int 2937 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 2938 { 2939 struct jme_softc *sc = arg1; 2940 struct ifnet *ifp = &sc->arpcom.ac_if; 2941 int error, v; 2942 2943 ifnet_serialize_all(ifp); 2944 2945 v = sc->jme_tx_coal_pkt; 2946 error = sysctl_handle_int(oidp, &v, 0, req); 2947 if (error || req->newptr == NULL) 2948 goto back; 2949 2950 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 2951 error = EINVAL; 2952 goto back; 2953 } 2954 2955 if (v != sc->jme_tx_coal_pkt) { 2956 sc->jme_tx_coal_pkt = v; 2957 if (ifp->if_flags & IFF_RUNNING) 2958 jme_set_tx_coal(sc); 2959 } 2960 back: 2961 ifnet_deserialize_all(ifp); 2962 return error; 2963 } 2964 2965 static int 2966 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 2967 { 2968 struct jme_softc *sc = arg1; 2969 struct ifnet *ifp = &sc->arpcom.ac_if; 2970 int error, v; 2971 2972 ifnet_serialize_all(ifp); 2973 2974 v = sc->jme_rx_coal_to; 2975 error = sysctl_handle_int(oidp, &v, 0, req); 2976 if (error || req->newptr == NULL) 2977 goto back; 2978 2979 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 2980 error = EINVAL; 2981 goto back; 2982 } 2983 2984 if (v != sc->jme_rx_coal_to) { 2985 sc->jme_rx_coal_to = v; 2986 if (ifp->if_flags & IFF_RUNNING) 2987 jme_set_rx_coal(sc); 2988 } 2989 back: 2990 ifnet_deserialize_all(ifp); 2991 return error; 2992 } 2993 2994 static int 2995 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 2996 { 2997 struct jme_softc *sc = arg1; 2998 struct ifnet *ifp = &sc->arpcom.ac_if; 2999 int error, v; 3000 3001 ifnet_serialize_all(ifp); 3002 3003 v = sc->jme_rx_coal_pkt; 3004 error = sysctl_handle_int(oidp, &v, 0, req); 3005 if (error || req->newptr == NULL) 3006 goto back; 3007 3008 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 3009 error = EINVAL; 3010 goto back; 3011 } 3012 3013 if (v != sc->jme_rx_coal_pkt) { 3014 sc->jme_rx_coal_pkt = v; 3015 if (ifp->if_flags & IFF_RUNNING) 3016 jme_set_rx_coal(sc); 3017 } 3018 back: 3019 ifnet_deserialize_all(ifp); 3020 return error; 3021 } 3022 3023 static void 3024 jme_set_tx_coal(struct jme_softc *sc) 3025 { 3026 uint32_t reg; 3027 3028 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3029 PCCTX_COAL_TO_MASK; 3030 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3031 PCCTX_COAL_PKT_MASK; 3032 reg |= PCCTX_COAL_TXQ0; 3033 CSR_WRITE_4(sc, JME_PCCTX, reg); 3034 } 3035 3036 static void 3037 jme_set_rx_coal(struct jme_softc *sc) 3038 { 3039 uint32_t reg; 3040 int r; 3041 3042 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3043 PCCRX_COAL_TO_MASK; 3044 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3045 PCCRX_COAL_PKT_MASK; 3046 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 3047 if (r < sc->jme_rx_ring_inuse) 3048 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 3049 else 3050 CSR_WRITE_4(sc, JME_PCCRX(r), 0); 3051 } 3052 } 3053 3054 #ifdef DEVICE_POLLING 3055 3056 static void 3057 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3058 { 3059 struct jme_softc *sc = ifp->if_softc; 3060 uint32_t status; 3061 int r; 3062 3063 ASSERT_SERIALIZED(&sc->jme_serialize); 3064 3065 switch (cmd) { 3066 case POLL_REGISTER: 3067 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 3068 break; 3069 3070 case POLL_DEREGISTER: 3071 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 3072 break; 3073 3074 case POLL_AND_CHECK_STATUS: 3075 case POLL_ONLY: 3076 status = CSR_READ_4(sc, JME_INTR_STATUS); 3077 3078 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 3079 struct jme_rxdata *rdata = 3080 &sc->jme_cdata.jme_rx_data[r]; 3081 3082 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3083 jme_rxeof(sc, r, count); 3084 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3085 } 3086 3087 if (status & INTR_RXQ_DESC_EMPTY) { 3088 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 3089 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 3090 RXCSR_RX_ENB | RXCSR_RXQ_START); 3091 } 3092 3093 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize); 3094 jme_txeof(sc); 3095 if (!ifq_is_empty(&ifp->if_snd)) 3096 if_devstart(ifp); 3097 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize); 3098 break; 3099 } 3100 } 3101 3102 #endif /* DEVICE_POLLING */ 3103 3104 static int 3105 jme_rxring_dma_alloc(struct jme_softc *sc, int ring) 3106 { 3107 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 3108 bus_dmamem_t dmem; 3109 int error; 3110 3111 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 3112 JME_RX_RING_ALIGN, 0, 3113 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3114 JME_RX_RING_SIZE(sc), 3115 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3116 if (error) { 3117 device_printf(sc->jme_dev, 3118 "could not allocate %dth Rx ring.\n", ring); 3119 return error; 3120 } 3121 rdata->jme_rx_ring_tag = dmem.dmem_tag; 3122 rdata->jme_rx_ring_map = dmem.dmem_map; 3123 rdata->jme_rx_ring = dmem.dmem_addr; 3124 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 3125 3126 return 0; 3127 } 3128 3129 static int 3130 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring) 3131 { 3132 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 3133 int i, error; 3134 3135 /* Create tag for Rx buffers. */ 3136 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 3137 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3138 BUS_SPACE_MAXADDR, /* lowaddr */ 3139 BUS_SPACE_MAXADDR, /* highaddr */ 3140 NULL, NULL, /* filter, filterarg */ 3141 MCLBYTES, /* maxsize */ 3142 1, /* nsegments */ 3143 MCLBYTES, /* maxsegsize */ 3144 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3145 &rdata->jme_rx_tag); 3146 if (error) { 3147 device_printf(sc->jme_dev, 3148 "could not create %dth Rx DMA tag.\n", ring); 3149 return error; 3150 } 3151 3152 /* Create DMA maps for Rx buffers. */ 3153 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3154 &rdata->jme_rx_sparemap); 3155 if (error) { 3156 device_printf(sc->jme_dev, 3157 "could not create %dth spare Rx dmamap.\n", ring); 3158 bus_dma_tag_destroy(rdata->jme_rx_tag); 3159 rdata->jme_rx_tag = NULL; 3160 return error; 3161 } 3162 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 3163 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3164 3165 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3166 &rxd->rx_dmamap); 3167 if (error) { 3168 int j; 3169 3170 device_printf(sc->jme_dev, 3171 "could not create %dth Rx dmamap " 3172 "for %dth RX ring.\n", i, ring); 3173 3174 for (j = 0; j < i; ++j) { 3175 rxd = &rdata->jme_rxdesc[j]; 3176 bus_dmamap_destroy(rdata->jme_rx_tag, 3177 rxd->rx_dmamap); 3178 } 3179 bus_dmamap_destroy(rdata->jme_rx_tag, 3180 rdata->jme_rx_sparemap); 3181 bus_dma_tag_destroy(rdata->jme_rx_tag); 3182 rdata->jme_rx_tag = NULL; 3183 return error; 3184 } 3185 } 3186 return 0; 3187 } 3188 3189 static void 3190 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3191 { 3192 int r; 3193 3194 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 3195 if (status & jme_rx_status[r].jme_coal) { 3196 struct jme_rxdata *rdata = 3197 &sc->jme_cdata.jme_rx_data[r]; 3198 3199 lwkt_serialize_enter(&rdata->jme_rx_serialize); 3200 jme_rxeof(sc, r, -1); 3201 lwkt_serialize_exit(&rdata->jme_rx_serialize); 3202 } 3203 } 3204 } 3205 3206 static void 3207 jme_enable_rss(struct jme_softc *sc) 3208 { 3209 uint32_t rssc, ind; 3210 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE]; 3211 int i; 3212 3213 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt; 3214 3215 KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 || 3216 sc->jme_rx_ring_inuse == JME_NRXRING_4, 3217 ("%s: invalid # of RX rings (%d)\n", 3218 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse)); 3219 3220 rssc = RSSC_HASH_64_ENTRY; 3221 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3222 rssc |= sc->jme_rx_ring_inuse >> 1; 3223 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3224 CSR_WRITE_4(sc, JME_RSSC, rssc); 3225 3226 toeplitz_get_key(key, sizeof(key)); 3227 for (i = 0; i < RSSKEY_NREGS; ++i) { 3228 uint32_t keyreg; 3229 3230 keyreg = RSSKEY_REGVAL(key, i); 3231 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg); 3232 3233 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg); 3234 } 3235 3236 /* 3237 * Create redirect table in following fashion: 3238 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3239 */ 3240 ind = 0; 3241 for (i = 0; i < RSSTBL_REGSIZE; ++i) { 3242 int q; 3243 3244 q = i % sc->jme_rx_ring_inuse; 3245 ind |= q << (i * 8); 3246 } 3247 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3248 3249 for (i = 0; i < RSSTBL_NREGS; ++i) 3250 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3251 } 3252 3253 static void 3254 jme_disable_rss(struct jme_softc *sc) 3255 { 3256 sc->jme_rx_ring_inuse = JME_NRXRING_1; 3257 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3258 } 3259 3260 static void 3261 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3262 { 3263 struct jme_softc *sc = ifp->if_softc; 3264 3265 switch (slz) { 3266 case IFNET_SERIALIZE_ALL: 3267 lwkt_serialize_array_enter(sc->jme_serialize_arr, 3268 sc->jme_serialize_cnt, 0); 3269 break; 3270 3271 case IFNET_SERIALIZE_MAIN: 3272 lwkt_serialize_enter(&sc->jme_serialize); 3273 break; 3274 3275 case IFNET_SERIALIZE_TX: 3276 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize); 3277 break; 3278 3279 case IFNET_SERIALIZE_RX(0): 3280 lwkt_serialize_enter( 3281 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize); 3282 break; 3283 3284 case IFNET_SERIALIZE_RX(1): 3285 lwkt_serialize_enter( 3286 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize); 3287 break; 3288 3289 case IFNET_SERIALIZE_RX(2): 3290 lwkt_serialize_enter( 3291 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize); 3292 break; 3293 3294 case IFNET_SERIALIZE_RX(3): 3295 lwkt_serialize_enter( 3296 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize); 3297 break; 3298 3299 default: 3300 panic("%s unsupported serialize type\n", ifp->if_xname); 3301 } 3302 } 3303 3304 static void 3305 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3306 { 3307 struct jme_softc *sc = ifp->if_softc; 3308 3309 switch (slz) { 3310 case IFNET_SERIALIZE_ALL: 3311 lwkt_serialize_array_exit(sc->jme_serialize_arr, 3312 sc->jme_serialize_cnt, 0); 3313 break; 3314 3315 case IFNET_SERIALIZE_MAIN: 3316 lwkt_serialize_exit(&sc->jme_serialize); 3317 break; 3318 3319 case IFNET_SERIALIZE_TX: 3320 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize); 3321 break; 3322 3323 case IFNET_SERIALIZE_RX(0): 3324 lwkt_serialize_exit( 3325 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize); 3326 break; 3327 3328 case IFNET_SERIALIZE_RX(1): 3329 lwkt_serialize_exit( 3330 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize); 3331 break; 3332 3333 case IFNET_SERIALIZE_RX(2): 3334 lwkt_serialize_exit( 3335 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize); 3336 break; 3337 3338 case IFNET_SERIALIZE_RX(3): 3339 lwkt_serialize_exit( 3340 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize); 3341 break; 3342 3343 default: 3344 panic("%s unsupported serialize type\n", ifp->if_xname); 3345 } 3346 } 3347 3348 static int 3349 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3350 { 3351 struct jme_softc *sc = ifp->if_softc; 3352 3353 switch (slz) { 3354 case IFNET_SERIALIZE_ALL: 3355 return lwkt_serialize_array_try(sc->jme_serialize_arr, 3356 sc->jme_serialize_cnt, 0); 3357 3358 case IFNET_SERIALIZE_MAIN: 3359 return lwkt_serialize_try(&sc->jme_serialize); 3360 3361 case IFNET_SERIALIZE_TX: 3362 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize); 3363 3364 case IFNET_SERIALIZE_RX(0): 3365 return lwkt_serialize_try( 3366 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize); 3367 3368 case IFNET_SERIALIZE_RX(1): 3369 return lwkt_serialize_try( 3370 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize); 3371 3372 case IFNET_SERIALIZE_RX(2): 3373 return lwkt_serialize_try( 3374 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize); 3375 3376 case IFNET_SERIALIZE_RX(3): 3377 return lwkt_serialize_try( 3378 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize); 3379 3380 default: 3381 panic("%s unsupported serialize type\n", ifp->if_xname); 3382 } 3383 } 3384 3385 #ifdef INVARIANTS 3386 3387 static void 3388 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3389 boolean_t serialized) 3390 { 3391 struct jme_softc *sc = ifp->if_softc; 3392 struct jme_rxdata *rdata; 3393 int i; 3394 3395 switch (slz) { 3396 case IFNET_SERIALIZE_ALL: 3397 if (serialized) { 3398 for (i = 0; i < sc->jme_serialize_cnt; ++i) 3399 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]); 3400 } else { 3401 for (i = 0; i < sc->jme_serialize_cnt; ++i) 3402 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]); 3403 } 3404 break; 3405 3406 case IFNET_SERIALIZE_MAIN: 3407 if (serialized) 3408 ASSERT_SERIALIZED(&sc->jme_serialize); 3409 else 3410 ASSERT_NOT_SERIALIZED(&sc->jme_serialize); 3411 break; 3412 3413 case IFNET_SERIALIZE_TX: 3414 if (serialized) 3415 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize); 3416 else 3417 ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize); 3418 break; 3419 3420 case IFNET_SERIALIZE_RX(0): 3421 rdata = &sc->jme_cdata.jme_rx_data[0]; 3422 if (serialized) 3423 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3424 else 3425 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize); 3426 break; 3427 3428 case IFNET_SERIALIZE_RX(1): 3429 rdata = &sc->jme_cdata.jme_rx_data[1]; 3430 if (serialized) 3431 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3432 else 3433 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize); 3434 break; 3435 3436 case IFNET_SERIALIZE_RX(2): 3437 rdata = &sc->jme_cdata.jme_rx_data[2]; 3438 if (serialized) 3439 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3440 else 3441 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize); 3442 break; 3443 3444 case IFNET_SERIALIZE_RX(3): 3445 rdata = &sc->jme_cdata.jme_rx_data[3]; 3446 if (serialized) 3447 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3448 else 3449 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize); 3450 break; 3451 3452 default: 3453 panic("%s unsupported serialize type\n", ifp->if_xname); 3454 } 3455 } 3456 3457 #endif /* INVARIANTS */ 3458 3459 static void 3460 jme_msix_try_alloc(device_t dev) 3461 { 3462 struct jme_softc *sc = device_get_softc(dev); 3463 struct jme_msix_data *msix; 3464 int error, i, r, msix_enable, msix_count; 3465 char env[64]; 3466 3467 msix_count = 1 + sc->jme_rx_ring_cnt; 3468 KKASSERT(msix_count <= JME_NMSIX); 3469 3470 msix_enable = jme_msix_enable; 3471 ksnprintf(env, sizeof(env), "hw.%s.msix.enable", 3472 device_get_nameunit(dev)); 3473 kgetenv_int(env, &msix_enable); 3474 3475 /* 3476 * We leave the 1st MSI-X vector unused, so we 3477 * actually need msix_count + 1 MSI-X vectors. 3478 */ 3479 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1)) 3480 return; 3481 3482 for (i = 0; i < msix_count; ++i) 3483 sc->jme_msix[i].jme_msix_rid = -1; 3484 3485 i = 0; 3486 3487 msix = &sc->jme_msix[i++]; 3488 msix->jme_msix_cpuid = 0; /* XXX Put TX to cpu0 */ 3489 msix->jme_msix_arg = &sc->jme_cdata; 3490 msix->jme_msix_func = jme_msix_tx; 3491 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO; 3492 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize; 3493 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx", 3494 device_get_nameunit(dev)); 3495 3496 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 3497 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 3498 3499 msix = &sc->jme_msix[i++]; 3500 msix->jme_msix_cpuid = r; /* XXX Put RX to cpuX */ 3501 msix->jme_msix_arg = rdata; 3502 msix->jme_msix_func = jme_msix_rx; 3503 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty; 3504 msix->jme_msix_serialize = &rdata->jme_rx_serialize; 3505 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), 3506 "%s rx%d", device_get_nameunit(dev), r); 3507 } 3508 3509 KKASSERT(i == msix_count); 3510 3511 error = pci_setup_msix(dev); 3512 if (error) 3513 return; 3514 3515 /* Setup jme_msix_cnt early, so we could cleanup */ 3516 sc->jme_msix_cnt = msix_count; 3517 3518 for (i = 0; i < msix_count; ++i) { 3519 msix = &sc->jme_msix[i]; 3520 3521 msix->jme_msix_vector = i + 1; 3522 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector, 3523 &msix->jme_msix_rid, msix->jme_msix_cpuid); 3524 if (error) 3525 goto back; 3526 3527 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3528 &msix->jme_msix_rid, RF_ACTIVE); 3529 if (msix->jme_msix_res == NULL) { 3530 error = ENOMEM; 3531 goto back; 3532 } 3533 } 3534 3535 for (i = 0; i < JME_INTR_CNT; ++i) { 3536 uint32_t intr_mask = (1 << i); 3537 int x; 3538 3539 if ((JME_INTRS & intr_mask) == 0) 3540 continue; 3541 3542 for (x = 0; x < msix_count; ++x) { 3543 msix = &sc->jme_msix[x]; 3544 if (msix->jme_msix_intrs & intr_mask) { 3545 int reg, shift; 3546 3547 reg = i / JME_MSINUM_FACTOR; 3548 KKASSERT(reg < JME_MSINUM_CNT); 3549 3550 shift = (i % JME_MSINUM_FACTOR) * 4; 3551 3552 sc->jme_msinum[reg] |= 3553 (msix->jme_msix_vector << shift); 3554 3555 break; 3556 } 3557 } 3558 } 3559 3560 if (bootverbose) { 3561 for (i = 0; i < JME_MSINUM_CNT; ++i) { 3562 device_printf(dev, "MSINUM%d: %#x\n", i, 3563 sc->jme_msinum[i]); 3564 } 3565 } 3566 3567 pci_enable_msix(dev); 3568 sc->jme_irq_type = PCI_INTR_TYPE_MSIX; 3569 3570 back: 3571 if (error) 3572 jme_msix_free(dev); 3573 } 3574 3575 static int 3576 jme_intr_alloc(device_t dev) 3577 { 3578 struct jme_softc *sc = device_get_softc(dev); 3579 u_int irq_flags; 3580 3581 jme_msix_try_alloc(dev); 3582 3583 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3584 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable, 3585 &sc->jme_irq_rid, &irq_flags); 3586 3587 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 3588 &sc->jme_irq_rid, irq_flags); 3589 if (sc->jme_irq_res == NULL) { 3590 device_printf(dev, "can't allocate irq\n"); 3591 return ENXIO; 3592 } 3593 } 3594 return 0; 3595 } 3596 3597 static void 3598 jme_msix_free(device_t dev) 3599 { 3600 struct jme_softc *sc = device_get_softc(dev); 3601 int i; 3602 3603 KKASSERT(sc->jme_msix_cnt > 1); 3604 3605 for (i = 0; i < sc->jme_msix_cnt; ++i) { 3606 struct jme_msix_data *msix = &sc->jme_msix[i]; 3607 3608 if (msix->jme_msix_res != NULL) { 3609 bus_release_resource(dev, SYS_RES_IRQ, 3610 msix->jme_msix_rid, msix->jme_msix_res); 3611 msix->jme_msix_res = NULL; 3612 } 3613 if (msix->jme_msix_rid >= 0) { 3614 pci_release_msix_vector(dev, msix->jme_msix_rid); 3615 msix->jme_msix_rid = -1; 3616 } 3617 } 3618 pci_teardown_msix(dev); 3619 } 3620 3621 static void 3622 jme_intr_free(device_t dev) 3623 { 3624 struct jme_softc *sc = device_get_softc(dev); 3625 3626 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) { 3627 if (sc->jme_irq_res != NULL) { 3628 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 3629 sc->jme_irq_res); 3630 } 3631 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI) 3632 pci_release_msi(dev); 3633 } else { 3634 jme_msix_free(dev); 3635 } 3636 } 3637 3638 static void 3639 jme_msix_tx(void *xcd) 3640 { 3641 struct jme_chain_data *cd = xcd; 3642 struct jme_softc *sc = cd->jme_sc; 3643 struct ifnet *ifp = &sc->arpcom.ac_if; 3644 3645 ASSERT_SERIALIZED(&cd->jme_tx_serialize); 3646 3647 CSR_WRITE_4(sc, JME_INTR_STATUS, 3648 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP); 3649 3650 if (ifp->if_flags & IFF_RUNNING) { 3651 jme_txeof(sc); 3652 if (!ifq_is_empty(&ifp->if_snd)) 3653 if_devstart(ifp); 3654 } 3655 } 3656 3657 static void 3658 jme_msix_rx(void *xrdata) 3659 { 3660 struct jme_rxdata *rdata = xrdata; 3661 struct jme_softc *sc = rdata->jme_sc; 3662 struct ifnet *ifp = &sc->arpcom.ac_if; 3663 uint32_t status; 3664 3665 ASSERT_SERIALIZED(&rdata->jme_rx_serialize); 3666 3667 status = CSR_READ_4(sc, JME_INTR_STATUS); 3668 status &= (rdata->jme_rx_coal | rdata->jme_rx_empty); 3669 3670 if (status & rdata->jme_rx_coal) { 3671 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp); 3672 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 3673 } 3674 3675 if (ifp->if_flags & IFF_RUNNING) { 3676 if (status & rdata->jme_rx_coal) 3677 jme_rxeof(sc, rdata->jme_rx_idx, -1); 3678 3679 if (status & rdata->jme_rx_empty) { 3680 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 3681 RXCSR_RX_ENB | RXCSR_RXQ_START); 3682 } 3683 } 3684 } 3685 3686 static void 3687 jme_set_msinum(struct jme_softc *sc) 3688 { 3689 int i; 3690 3691 for (i = 0; i < JME_MSINUM_CNT; ++i) 3692 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]); 3693 } 3694 3695 static int 3696 jme_intr_setup(device_t dev) 3697 { 3698 struct jme_softc *sc = device_get_softc(dev); 3699 struct ifnet *ifp = &sc->arpcom.ac_if; 3700 int error; 3701 3702 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3703 return jme_msix_setup(dev); 3704 3705 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, 3706 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize); 3707 if (error) { 3708 device_printf(dev, "could not set up interrupt handler.\n"); 3709 return error; 3710 } 3711 3712 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res); 3713 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 3714 return 0; 3715 } 3716 3717 static void 3718 jme_intr_teardown(device_t dev) 3719 { 3720 struct jme_softc *sc = device_get_softc(dev); 3721 3722 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) 3723 jme_msix_teardown(dev, sc->jme_msix_cnt); 3724 else 3725 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 3726 } 3727 3728 static int 3729 jme_msix_setup(device_t dev) 3730 { 3731 struct jme_softc *sc = device_get_softc(dev); 3732 struct ifnet *ifp = &sc->arpcom.ac_if; 3733 int x; 3734 3735 for (x = 0; x < sc->jme_msix_cnt; ++x) { 3736 struct jme_msix_data *msix = &sc->jme_msix[x]; 3737 int error; 3738 3739 error = bus_setup_intr_descr(dev, msix->jme_msix_res, 3740 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg, 3741 &msix->jme_msix_handle, msix->jme_msix_serialize, 3742 msix->jme_msix_desc); 3743 if (error) { 3744 device_printf(dev, "could not set up %s " 3745 "interrupt handler.\n", msix->jme_msix_desc); 3746 jme_msix_teardown(dev, x); 3747 return error; 3748 } 3749 } 3750 ifp->if_cpuid = 0; /* XXX */ 3751 return 0; 3752 } 3753 3754 static void 3755 jme_msix_teardown(device_t dev, int msix_count) 3756 { 3757 struct jme_softc *sc = device_get_softc(dev); 3758 int x; 3759 3760 for (x = 0; x < msix_count; ++x) { 3761 struct jme_msix_data *msix = &sc->jme_msix[x]; 3762 3763 bus_teardown_intr(dev, msix->jme_msix_res, 3764 msix->jme_msix_handle); 3765 } 3766 } 3767