1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $ 29 */ 30 31 #include "opt_polling.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 46 #include <net/ethernet.h> 47 #include <net/if.h> 48 #include <net/bpf.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 #include <net/ifq_var.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 56 #include <dev/netif/mii_layer/miivar.h> 57 #include <dev/netif/mii_layer/jmphyreg.h> 58 59 #include <bus/pci/pcireg.h> 60 #include <bus/pci/pcivar.h> 61 #include <bus/pci/pcidevs.h> 62 63 #include <dev/netif/jme/if_jmereg.h> 64 #include <dev/netif/jme/if_jmevar.h> 65 66 #include "miibus_if.h" 67 68 /* Define the following to disable printing Rx errors. */ 69 #undef JME_SHOW_ERRORS 70 71 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 72 73 #define JME_RSS_DEBUG 74 75 #ifdef JME_RSS_DEBUG 76 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \ 77 do { \ 78 if ((sc)->jme_rss_debug > (lvl)) \ 79 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \ 80 } while (0) 81 #else /* !JME_RSS_DEBUG */ 82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 83 #endif /* JME_RSS_DEBUG */ 84 85 static int jme_probe(device_t); 86 static int jme_attach(device_t); 87 static int jme_detach(device_t); 88 static int jme_shutdown(device_t); 89 static int jme_suspend(device_t); 90 static int jme_resume(device_t); 91 92 static int jme_miibus_readreg(device_t, int, int); 93 static int jme_miibus_writereg(device_t, int, int, int); 94 static void jme_miibus_statchg(device_t); 95 96 static void jme_init(void *); 97 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 98 static void jme_start(struct ifnet *); 99 static void jme_watchdog(struct ifnet *); 100 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 101 static int jme_mediachange(struct ifnet *); 102 #ifdef DEVICE_POLLING 103 static void jme_poll(struct ifnet *, enum poll_cmd, int); 104 #endif 105 106 static void jme_intr(void *); 107 static void jme_txeof(struct jme_softc *); 108 static void jme_rxeof(struct jme_softc *, int); 109 static int jme_rxeof_chain(struct jme_softc *, int, 110 struct mbuf_chain *, int); 111 static void jme_rx_intr(struct jme_softc *, uint32_t); 112 113 static int jme_dma_alloc(struct jme_softc *); 114 static void jme_dma_free(struct jme_softc *); 115 static int jme_init_rx_ring(struct jme_softc *, int); 116 static void jme_init_tx_ring(struct jme_softc *); 117 static void jme_init_ssb(struct jme_softc *); 118 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int); 119 static int jme_encap(struct jme_softc *, struct mbuf **); 120 static void jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *); 121 static int jme_rxring_dma_alloc(struct jme_softc *, int); 122 static int jme_rxbuf_dma_alloc(struct jme_softc *, int); 123 124 static void jme_tick(void *); 125 static void jme_stop(struct jme_softc *); 126 static void jme_reset(struct jme_softc *); 127 static void jme_set_vlan(struct jme_softc *); 128 static void jme_set_filter(struct jme_softc *); 129 static void jme_stop_tx(struct jme_softc *); 130 static void jme_stop_rx(struct jme_softc *); 131 static void jme_mac_config(struct jme_softc *); 132 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 133 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 134 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 135 #ifdef notyet 136 static void jme_setwol(struct jme_softc *); 137 static void jme_setlinkspeed(struct jme_softc *); 138 #endif 139 static void jme_set_tx_coal(struct jme_softc *); 140 static void jme_set_rx_coal(struct jme_softc *); 141 static void jme_enable_rss(struct jme_softc *); 142 static void jme_disable_rss(struct jme_softc *); 143 144 static void jme_sysctl_node(struct jme_softc *); 145 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 146 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 147 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 148 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 149 150 /* 151 * Devices supported by this driver. 152 */ 153 static const struct jme_dev { 154 uint16_t jme_vendorid; 155 uint16_t jme_deviceid; 156 uint32_t jme_caps; 157 const char *jme_name; 158 } jme_devs[] = { 159 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 160 JME_CAP_JUMBO, 161 "JMicron Inc, JMC250 Gigabit Ethernet" }, 162 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 163 JME_CAP_FASTETH, 164 "JMicron Inc, JMC260 Fast Ethernet" }, 165 { 0, 0, 0, NULL } 166 }; 167 168 static device_method_t jme_methods[] = { 169 /* Device interface. */ 170 DEVMETHOD(device_probe, jme_probe), 171 DEVMETHOD(device_attach, jme_attach), 172 DEVMETHOD(device_detach, jme_detach), 173 DEVMETHOD(device_shutdown, jme_shutdown), 174 DEVMETHOD(device_suspend, jme_suspend), 175 DEVMETHOD(device_resume, jme_resume), 176 177 /* Bus interface. */ 178 DEVMETHOD(bus_print_child, bus_generic_print_child), 179 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 180 181 /* MII interface. */ 182 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 183 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 184 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 185 186 { NULL, NULL } 187 }; 188 189 static driver_t jme_driver = { 190 "jme", 191 jme_methods, 192 sizeof(struct jme_softc) 193 }; 194 195 static devclass_t jme_devclass; 196 197 DECLARE_DUMMY_MODULE(if_jme); 198 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 199 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0); 200 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); 201 202 static const struct { 203 uint32_t jme_coal; 204 uint32_t jme_comp; 205 } jme_rx_status[JME_NRXRING_MAX] = { 206 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP }, 207 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP }, 208 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP }, 209 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP } 210 }; 211 212 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 213 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 214 static int jme_rx_ring_count = JME_NRXRING_DEF; 215 216 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 217 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 218 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count); 219 220 /* 221 * Read a PHY register on the MII of the JMC250. 222 */ 223 static int 224 jme_miibus_readreg(device_t dev, int phy, int reg) 225 { 226 struct jme_softc *sc = device_get_softc(dev); 227 uint32_t val; 228 int i; 229 230 /* For FPGA version, PHY address 0 should be ignored. */ 231 if (sc->jme_caps & JME_CAP_FPGA) { 232 if (phy == 0) 233 return (0); 234 } else { 235 if (sc->jme_phyaddr != phy) 236 return (0); 237 } 238 239 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 240 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 241 242 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 243 DELAY(1); 244 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 245 break; 246 } 247 if (i == 0) { 248 device_printf(sc->jme_dev, "phy read timeout: " 249 "phy %d, reg %d\n", phy, reg); 250 return (0); 251 } 252 253 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 254 } 255 256 /* 257 * Write a PHY register on the MII of the JMC250. 258 */ 259 static int 260 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 261 { 262 struct jme_softc *sc = device_get_softc(dev); 263 int i; 264 265 /* For FPGA version, PHY address 0 should be ignored. */ 266 if (sc->jme_caps & JME_CAP_FPGA) { 267 if (phy == 0) 268 return (0); 269 } else { 270 if (sc->jme_phyaddr != phy) 271 return (0); 272 } 273 274 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 275 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 276 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 277 278 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 279 DELAY(1); 280 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 281 break; 282 } 283 if (i == 0) { 284 device_printf(sc->jme_dev, "phy write timeout: " 285 "phy %d, reg %d\n", phy, reg); 286 } 287 288 return (0); 289 } 290 291 /* 292 * Callback from MII layer when media changes. 293 */ 294 static void 295 jme_miibus_statchg(device_t dev) 296 { 297 struct jme_softc *sc = device_get_softc(dev); 298 struct ifnet *ifp = &sc->arpcom.ac_if; 299 struct mii_data *mii; 300 struct jme_txdesc *txd; 301 bus_addr_t paddr; 302 int i, r; 303 304 ASSERT_SERIALIZED(ifp->if_serializer); 305 306 if ((ifp->if_flags & IFF_RUNNING) == 0) 307 return; 308 309 mii = device_get_softc(sc->jme_miibus); 310 311 sc->jme_flags &= ~JME_FLAG_LINK; 312 if ((mii->mii_media_status & IFM_AVALID) != 0) { 313 switch (IFM_SUBTYPE(mii->mii_media_active)) { 314 case IFM_10_T: 315 case IFM_100_TX: 316 sc->jme_flags |= JME_FLAG_LINK; 317 break; 318 case IFM_1000_T: 319 if (sc->jme_caps & JME_CAP_FASTETH) 320 break; 321 sc->jme_flags |= JME_FLAG_LINK; 322 break; 323 default: 324 break; 325 } 326 } 327 328 /* 329 * Disabling Rx/Tx MACs have a side-effect of resetting 330 * JME_TXNDA/JME_RXNDA register to the first address of 331 * Tx/Rx descriptor address. So driver should reset its 332 * internal procucer/consumer pointer and reclaim any 333 * allocated resources. Note, just saving the value of 334 * JME_TXNDA and JME_RXNDA registers before stopping MAC 335 * and restoring JME_TXNDA/JME_RXNDA register is not 336 * sufficient to make sure correct MAC state because 337 * stopping MAC operation can take a while and hardware 338 * might have updated JME_TXNDA/JME_RXNDA registers 339 * during the stop operation. 340 */ 341 342 /* Disable interrupts */ 343 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 344 345 /* Stop driver */ 346 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 347 ifp->if_timer = 0; 348 callout_stop(&sc->jme_tick_ch); 349 350 /* Stop receiver/transmitter. */ 351 jme_stop_rx(sc); 352 jme_stop_tx(sc); 353 354 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 355 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r]; 356 357 jme_rxeof(sc, r); 358 if (rdata->jme_rxhead != NULL) 359 m_freem(rdata->jme_rxhead); 360 JME_RXCHAIN_RESET(sc, r); 361 362 /* 363 * Reuse configured Rx descriptors and reset 364 * procuder/consumer index. 365 */ 366 rdata->jme_rx_cons = 0; 367 } 368 369 jme_txeof(sc); 370 if (sc->jme_cdata.jme_tx_cnt != 0) { 371 /* Remove queued packets for transmit. */ 372 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 373 txd = &sc->jme_cdata.jme_txdesc[i]; 374 if (txd->tx_m != NULL) { 375 bus_dmamap_unload( 376 sc->jme_cdata.jme_tx_tag, 377 txd->tx_dmamap); 378 m_freem(txd->tx_m); 379 txd->tx_m = NULL; 380 txd->tx_ndesc = 0; 381 ifp->if_oerrors++; 382 } 383 } 384 } 385 jme_init_tx_ring(sc); 386 387 /* Initialize shadow status block. */ 388 jme_init_ssb(sc); 389 390 /* Program MAC with resolved speed/duplex/flow-control. */ 391 if (sc->jme_flags & JME_FLAG_LINK) { 392 jme_mac_config(sc); 393 394 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 395 396 /* Set Tx ring address to the hardware. */ 397 paddr = sc->jme_cdata.jme_tx_ring_paddr; 398 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 399 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 400 401 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 402 CSR_WRITE_4(sc, JME_RXCSR, 403 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 404 405 /* Set Rx ring address to the hardware. */ 406 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 407 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 408 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 409 } 410 411 /* Restart receiver/transmitter. */ 412 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 413 RXCSR_RXQ_START); 414 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 415 } 416 417 ifp->if_flags |= IFF_RUNNING; 418 ifp->if_flags &= ~IFF_OACTIVE; 419 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 420 421 #ifdef DEVICE_POLLING 422 if (!(ifp->if_flags & IFF_POLLING)) 423 #endif 424 /* Reenable interrupts. */ 425 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 426 } 427 428 /* 429 * Get the current interface media status. 430 */ 431 static void 432 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 433 { 434 struct jme_softc *sc = ifp->if_softc; 435 struct mii_data *mii = device_get_softc(sc->jme_miibus); 436 437 ASSERT_SERIALIZED(ifp->if_serializer); 438 439 mii_pollstat(mii); 440 ifmr->ifm_status = mii->mii_media_status; 441 ifmr->ifm_active = mii->mii_media_active; 442 } 443 444 /* 445 * Set hardware to newly-selected media. 446 */ 447 static int 448 jme_mediachange(struct ifnet *ifp) 449 { 450 struct jme_softc *sc = ifp->if_softc; 451 struct mii_data *mii = device_get_softc(sc->jme_miibus); 452 int error; 453 454 ASSERT_SERIALIZED(ifp->if_serializer); 455 456 if (mii->mii_instance != 0) { 457 struct mii_softc *miisc; 458 459 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 460 mii_phy_reset(miisc); 461 } 462 error = mii_mediachg(mii); 463 464 return (error); 465 } 466 467 static int 468 jme_probe(device_t dev) 469 { 470 const struct jme_dev *sp; 471 uint16_t vid, did; 472 473 vid = pci_get_vendor(dev); 474 did = pci_get_device(dev); 475 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 476 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 477 struct jme_softc *sc = device_get_softc(dev); 478 479 sc->jme_caps = sp->jme_caps; 480 device_set_desc(dev, sp->jme_name); 481 return (0); 482 } 483 } 484 return (ENXIO); 485 } 486 487 static int 488 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 489 { 490 uint32_t reg; 491 int i; 492 493 *val = 0; 494 for (i = JME_TIMEOUT; i > 0; i--) { 495 reg = CSR_READ_4(sc, JME_SMBCSR); 496 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 497 break; 498 DELAY(1); 499 } 500 501 if (i == 0) { 502 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 503 return (ETIMEDOUT); 504 } 505 506 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 507 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 508 for (i = JME_TIMEOUT; i > 0; i--) { 509 DELAY(1); 510 reg = CSR_READ_4(sc, JME_SMBINTF); 511 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 512 break; 513 } 514 515 if (i == 0) { 516 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 517 return (ETIMEDOUT); 518 } 519 520 reg = CSR_READ_4(sc, JME_SMBINTF); 521 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 522 523 return (0); 524 } 525 526 static int 527 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 528 { 529 uint8_t fup, reg, val; 530 uint32_t offset; 531 int match; 532 533 offset = 0; 534 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 535 fup != JME_EEPROM_SIG0) 536 return (ENOENT); 537 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 538 fup != JME_EEPROM_SIG1) 539 return (ENOENT); 540 match = 0; 541 do { 542 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 543 break; 544 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 545 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 546 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 547 break; 548 if (reg >= JME_PAR0 && 549 reg < JME_PAR0 + ETHER_ADDR_LEN) { 550 if (jme_eeprom_read_byte(sc, offset + 2, 551 &val) != 0) 552 break; 553 eaddr[reg - JME_PAR0] = val; 554 match++; 555 } 556 } 557 /* Check for the end of EEPROM descriptor. */ 558 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 559 break; 560 /* Try next eeprom descriptor. */ 561 offset += JME_EEPROM_DESC_BYTES; 562 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 563 564 if (match == ETHER_ADDR_LEN) 565 return (0); 566 567 return (ENOENT); 568 } 569 570 static void 571 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 572 { 573 uint32_t par0, par1; 574 575 /* Read station address. */ 576 par0 = CSR_READ_4(sc, JME_PAR0); 577 par1 = CSR_READ_4(sc, JME_PAR1); 578 par1 &= 0xFFFF; 579 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 580 device_printf(sc->jme_dev, 581 "generating fake ethernet address.\n"); 582 par0 = karc4random(); 583 /* Set OUI to JMicron. */ 584 eaddr[0] = 0x00; 585 eaddr[1] = 0x1B; 586 eaddr[2] = 0x8C; 587 eaddr[3] = (par0 >> 16) & 0xff; 588 eaddr[4] = (par0 >> 8) & 0xff; 589 eaddr[5] = par0 & 0xff; 590 } else { 591 eaddr[0] = (par0 >> 0) & 0xFF; 592 eaddr[1] = (par0 >> 8) & 0xFF; 593 eaddr[2] = (par0 >> 16) & 0xFF; 594 eaddr[3] = (par0 >> 24) & 0xFF; 595 eaddr[4] = (par1 >> 0) & 0xFF; 596 eaddr[5] = (par1 >> 8) & 0xFF; 597 } 598 } 599 600 static int 601 jme_attach(device_t dev) 602 { 603 struct jme_softc *sc = device_get_softc(dev); 604 struct ifnet *ifp = &sc->arpcom.ac_if; 605 uint32_t reg; 606 uint16_t did; 607 uint8_t pcie_ptr, rev; 608 int error = 0; 609 uint8_t eaddr[ETHER_ADDR_LEN]; 610 611 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN); 612 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX) 613 sc->jme_rx_desc_cnt = JME_NDESC_MAX; 614 615 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN); 616 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX) 617 sc->jme_tx_desc_cnt = JME_NDESC_MAX; 618 619 sc->jme_rx_ring_cnt = jme_rx_ring_count; 620 if (sc->jme_rx_ring_cnt <= 0) 621 sc->jme_rx_ring_cnt = JME_NRXRING_1; 622 if (sc->jme_rx_ring_cnt > ncpus2) 623 sc->jme_rx_ring_cnt = ncpus2; 624 625 if (sc->jme_rx_ring_cnt >= JME_NRXRING_4) 626 sc->jme_rx_ring_cnt = JME_NRXRING_4; 627 else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2) 628 sc->jme_rx_ring_cnt = JME_NRXRING_2; 629 630 if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN) { 631 sc->jme_caps |= JME_CAP_RSS; 632 sc->jme_flags |= JME_FLAG_RSS; 633 } 634 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt; 635 636 sc->jme_dev = dev; 637 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 638 639 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 640 641 callout_init(&sc->jme_tick_ch); 642 643 #ifndef BURN_BRIDGES 644 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 645 uint32_t irq, mem; 646 647 irq = pci_read_config(dev, PCIR_INTLINE, 4); 648 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 649 650 device_printf(dev, "chip is in D%d power mode " 651 "-- setting to D0\n", pci_get_powerstate(dev)); 652 653 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 654 655 pci_write_config(dev, PCIR_INTLINE, irq, 4); 656 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 657 } 658 #endif /* !BURN_BRIDGE */ 659 660 /* Enable bus mastering */ 661 pci_enable_busmaster(dev); 662 663 /* 664 * Allocate IO memory 665 * 666 * JMC250 supports both memory mapped and I/O register space 667 * access. Because I/O register access should use different 668 * BARs to access registers it's waste of time to use I/O 669 * register spce access. JMC250 uses 16K to map entire memory 670 * space. 671 */ 672 sc->jme_mem_rid = JME_PCIR_BAR; 673 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 674 &sc->jme_mem_rid, RF_ACTIVE); 675 if (sc->jme_mem_res == NULL) { 676 device_printf(dev, "can't allocate IO memory\n"); 677 return ENXIO; 678 } 679 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 680 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 681 682 /* 683 * Allocate IRQ 684 */ 685 sc->jme_irq_rid = 0; 686 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 687 &sc->jme_irq_rid, 688 RF_SHAREABLE | RF_ACTIVE); 689 if (sc->jme_irq_res == NULL) { 690 device_printf(dev, "can't allocate irq\n"); 691 error = ENXIO; 692 goto fail; 693 } 694 695 /* 696 * Extract revisions 697 */ 698 reg = CSR_READ_4(sc, JME_CHIPMODE); 699 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 700 CHIPMODE_NOT_FPGA) { 701 sc->jme_caps |= JME_CAP_FPGA; 702 if (bootverbose) { 703 device_printf(dev, "FPGA revision: 0x%04x\n", 704 (reg & CHIPMODE_FPGA_REV_MASK) >> 705 CHIPMODE_FPGA_REV_SHIFT); 706 } 707 } 708 709 /* NOTE: FM revision is put in the upper 4 bits */ 710 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 711 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 712 if (bootverbose) 713 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 714 715 did = pci_get_device(dev); 716 switch (did) { 717 case PCI_PRODUCT_JMICRON_JMC250: 718 if (rev == JME_REV1_A2) 719 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 720 break; 721 722 case PCI_PRODUCT_JMICRON_JMC260: 723 if (rev == JME_REV2) 724 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 725 break; 726 727 default: 728 panic("unknown device id 0x%04x\n", did); 729 } 730 if (rev >= JME_REV2) { 731 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 732 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 733 GHC_TXMAC_CLKSRC_1000; 734 } 735 736 /* Reset the ethernet controller. */ 737 jme_reset(sc); 738 739 /* Get station address. */ 740 reg = CSR_READ_4(sc, JME_SMBCSR); 741 if (reg & SMBCSR_EEPROM_PRESENT) 742 error = jme_eeprom_macaddr(sc, eaddr); 743 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 744 if (error != 0 && (bootverbose)) { 745 device_printf(dev, "ethernet hardware address " 746 "not found in EEPROM.\n"); 747 } 748 jme_reg_macaddr(sc, eaddr); 749 } 750 751 /* 752 * Save PHY address. 753 * Integrated JR0211 has fixed PHY address whereas FPGA version 754 * requires PHY probing to get correct PHY address. 755 */ 756 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 757 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 758 GPREG0_PHY_ADDR_MASK; 759 if (bootverbose) { 760 device_printf(dev, "PHY is at address %d.\n", 761 sc->jme_phyaddr); 762 } 763 } else { 764 sc->jme_phyaddr = 0; 765 } 766 767 /* Set max allowable DMA size. */ 768 pcie_ptr = pci_get_pciecap_ptr(dev); 769 if (pcie_ptr != 0) { 770 uint16_t ctrl; 771 772 sc->jme_caps |= JME_CAP_PCIE; 773 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 774 if (bootverbose) { 775 device_printf(dev, "Read request size : %d bytes.\n", 776 128 << ((ctrl >> 12) & 0x07)); 777 device_printf(dev, "TLP payload size : %d bytes.\n", 778 128 << ((ctrl >> 5) & 0x07)); 779 } 780 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 781 case PCIEM_DEVCTL_MAX_READRQ_128: 782 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 783 break; 784 case PCIEM_DEVCTL_MAX_READRQ_256: 785 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 786 break; 787 default: 788 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 789 break; 790 } 791 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 792 } else { 793 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 794 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 795 } 796 797 #ifdef notyet 798 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 799 sc->jme_caps |= JME_CAP_PMCAP; 800 #endif 801 802 /* 803 * Create sysctl tree 804 */ 805 jme_sysctl_node(sc); 806 807 /* Allocate DMA stuffs */ 808 error = jme_dma_alloc(sc); 809 if (error) 810 goto fail; 811 812 ifp->if_softc = sc; 813 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 814 ifp->if_init = jme_init; 815 ifp->if_ioctl = jme_ioctl; 816 ifp->if_start = jme_start; 817 #ifdef DEVICE_POLLING 818 ifp->if_poll = jme_poll; 819 #endif 820 ifp->if_watchdog = jme_watchdog; 821 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD); 822 ifq_set_ready(&ifp->if_snd); 823 824 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 825 ifp->if_capabilities = IFCAP_HWCSUM | 826 IFCAP_VLAN_MTU | 827 IFCAP_VLAN_HWTAGGING; 828 ifp->if_hwassist = JME_CSUM_FEATURES; 829 ifp->if_capenable = ifp->if_capabilities; 830 831 /* Set up MII bus. */ 832 error = mii_phy_probe(dev, &sc->jme_miibus, 833 jme_mediachange, jme_mediastatus); 834 if (error) { 835 device_printf(dev, "no PHY found!\n"); 836 goto fail; 837 } 838 839 /* 840 * Save PHYADDR for FPGA mode PHY. 841 */ 842 if (sc->jme_caps & JME_CAP_FPGA) { 843 struct mii_data *mii = device_get_softc(sc->jme_miibus); 844 845 if (mii->mii_instance != 0) { 846 struct mii_softc *miisc; 847 848 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 849 if (miisc->mii_phy != 0) { 850 sc->jme_phyaddr = miisc->mii_phy; 851 break; 852 } 853 } 854 if (sc->jme_phyaddr != 0) { 855 device_printf(sc->jme_dev, 856 "FPGA PHY is at %d\n", sc->jme_phyaddr); 857 /* vendor magic. */ 858 jme_miibus_writereg(dev, sc->jme_phyaddr, 859 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 860 861 /* XXX should we clear JME_WA_EXTFIFO */ 862 } 863 } 864 } 865 866 ether_ifattach(ifp, eaddr, NULL); 867 868 /* Tell the upper layer(s) we support long frames. */ 869 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 870 871 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc, 872 &sc->jme_irq_handle, ifp->if_serializer); 873 if (error) { 874 device_printf(dev, "could not set up interrupt handler.\n"); 875 ether_ifdetach(ifp); 876 goto fail; 877 } 878 879 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res)); 880 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 881 return 0; 882 fail: 883 jme_detach(dev); 884 return (error); 885 } 886 887 static int 888 jme_detach(device_t dev) 889 { 890 struct jme_softc *sc = device_get_softc(dev); 891 892 if (device_is_attached(dev)) { 893 struct ifnet *ifp = &sc->arpcom.ac_if; 894 895 lwkt_serialize_enter(ifp->if_serializer); 896 jme_stop(sc); 897 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 898 lwkt_serialize_exit(ifp->if_serializer); 899 900 ether_ifdetach(ifp); 901 } 902 903 if (sc->jme_sysctl_tree != NULL) 904 sysctl_ctx_free(&sc->jme_sysctl_ctx); 905 906 if (sc->jme_miibus != NULL) 907 device_delete_child(dev, sc->jme_miibus); 908 bus_generic_detach(dev); 909 910 if (sc->jme_irq_res != NULL) { 911 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 912 sc->jme_irq_res); 913 } 914 915 if (sc->jme_mem_res != NULL) { 916 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 917 sc->jme_mem_res); 918 } 919 920 jme_dma_free(sc); 921 922 return (0); 923 } 924 925 static void 926 jme_sysctl_node(struct jme_softc *sc) 927 { 928 int coal_max; 929 #ifdef JME_RSS_DEBUG 930 char rx_ring_pkt[32]; 931 int r; 932 #endif 933 934 sysctl_ctx_init(&sc->jme_sysctl_ctx); 935 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 936 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 937 device_get_nameunit(sc->jme_dev), 938 CTLFLAG_RD, 0, ""); 939 if (sc->jme_sysctl_tree == NULL) { 940 device_printf(sc->jme_dev, "can't add sysctl node\n"); 941 return; 942 } 943 944 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 945 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 946 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 947 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 948 949 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 950 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 951 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 952 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 953 954 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 955 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 956 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 957 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 958 959 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 960 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 961 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 962 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 963 964 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 965 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 966 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt, 967 0, "RX desc count"); 968 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 969 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 970 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt, 971 0, "TX desc count"); 972 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 973 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 974 "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt, 975 0, "RX ring count"); 976 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 977 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 978 "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse, 979 0, "RX ring in use"); 980 #ifdef JME_RSS_DEBUG 981 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 982 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 983 "rss_debug", CTLFLAG_RD, &sc->jme_rss_debug, 984 0, "RSS debug level"); 985 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 986 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r); 987 SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx, 988 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 989 rx_ring_pkt, CTLFLAG_RD, 990 &sc->jme_rx_ring_pkt[r], 991 0, "RXed packets"); 992 } 993 #endif 994 995 /* 996 * Set default coalesce valves 997 */ 998 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 999 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1000 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1001 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1002 1003 /* 1004 * Adjust coalesce valves, in case that the number of TX/RX 1005 * descs are set to small values by users. 1006 * 1007 * NOTE: coal_max will not be zero, since number of descs 1008 * must aligned by JME_NDESC_ALIGN (16 currently) 1009 */ 1010 coal_max = sc->jme_tx_desc_cnt / 6; 1011 if (coal_max < sc->jme_tx_coal_pkt) 1012 sc->jme_tx_coal_pkt = coal_max; 1013 1014 coal_max = sc->jme_rx_desc_cnt / 4; 1015 if (coal_max < sc->jme_rx_coal_pkt) 1016 sc->jme_rx_coal_pkt = coal_max; 1017 } 1018 1019 static int 1020 jme_dma_alloc(struct jme_softc *sc) 1021 { 1022 struct jme_txdesc *txd; 1023 bus_dmamem_t dmem; 1024 int error, i; 1025 1026 sc->jme_cdata.jme_txdesc = 1027 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc), 1028 M_DEVBUF, M_WAITOK | M_ZERO); 1029 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 1030 sc->jme_cdata.jme_rx_data[i].jme_rxdesc = 1031 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc), 1032 M_DEVBUF, M_WAITOK | M_ZERO); 1033 } 1034 1035 /* Create parent ring tag. */ 1036 error = bus_dma_tag_create(NULL,/* parent */ 1037 1, JME_RING_BOUNDARY, /* algnmnt, boundary */ 1038 sc->jme_lowaddr, /* lowaddr */ 1039 BUS_SPACE_MAXADDR, /* highaddr */ 1040 NULL, NULL, /* filter, filterarg */ 1041 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1042 0, /* nsegments */ 1043 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1044 0, /* flags */ 1045 &sc->jme_cdata.jme_ring_tag); 1046 if (error) { 1047 device_printf(sc->jme_dev, 1048 "could not create parent ring DMA tag.\n"); 1049 return error; 1050 } 1051 1052 /* 1053 * Create DMA stuffs for TX ring 1054 */ 1055 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 1056 JME_TX_RING_ALIGN, 0, 1057 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1058 JME_TX_RING_SIZE(sc), 1059 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1060 if (error) { 1061 device_printf(sc->jme_dev, "could not allocate Tx ring.\n"); 1062 return error; 1063 } 1064 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag; 1065 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map; 1066 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr; 1067 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr; 1068 1069 /* 1070 * Create DMA stuffs for RX rings 1071 */ 1072 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 1073 error = jme_rxring_dma_alloc(sc, i); 1074 if (error) 1075 return error; 1076 } 1077 1078 /* Create parent buffer tag. */ 1079 error = bus_dma_tag_create(NULL,/* parent */ 1080 1, 0, /* algnmnt, boundary */ 1081 sc->jme_lowaddr, /* lowaddr */ 1082 BUS_SPACE_MAXADDR, /* highaddr */ 1083 NULL, NULL, /* filter, filterarg */ 1084 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1085 0, /* nsegments */ 1086 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1087 0, /* flags */ 1088 &sc->jme_cdata.jme_buffer_tag); 1089 if (error) { 1090 device_printf(sc->jme_dev, 1091 "could not create parent buffer DMA tag.\n"); 1092 return error; 1093 } 1094 1095 /* 1096 * Create DMA stuffs for shadow status block 1097 */ 1098 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag, 1099 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1100 JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1101 if (error) { 1102 device_printf(sc->jme_dev, 1103 "could not create shadow status block.\n"); 1104 return error; 1105 } 1106 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag; 1107 sc->jme_cdata.jme_ssb_map = dmem.dmem_map; 1108 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr; 1109 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr; 1110 1111 /* 1112 * Create DMA stuffs for TX buffers 1113 */ 1114 1115 /* Create tag for Tx buffers. */ 1116 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1117 1, 0, /* algnmnt, boundary */ 1118 BUS_SPACE_MAXADDR, /* lowaddr */ 1119 BUS_SPACE_MAXADDR, /* highaddr */ 1120 NULL, NULL, /* filter, filterarg */ 1121 JME_JUMBO_FRAMELEN, /* maxsize */ 1122 JME_MAXTXSEGS, /* nsegments */ 1123 JME_MAXSEGSIZE, /* maxsegsize */ 1124 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */ 1125 &sc->jme_cdata.jme_tx_tag); 1126 if (error != 0) { 1127 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1128 return error; 1129 } 1130 1131 /* Create DMA maps for Tx buffers. */ 1132 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 1133 txd = &sc->jme_cdata.jme_txdesc[i]; 1134 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 1135 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1136 &txd->tx_dmamap); 1137 if (error) { 1138 int j; 1139 1140 device_printf(sc->jme_dev, 1141 "could not create %dth Tx dmamap.\n", i); 1142 1143 for (j = 0; j < i; ++j) { 1144 txd = &sc->jme_cdata.jme_txdesc[j]; 1145 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1146 txd->tx_dmamap); 1147 } 1148 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1149 sc->jme_cdata.jme_tx_tag = NULL; 1150 return error; 1151 } 1152 } 1153 1154 /* 1155 * Create DMA stuffs for RX buffers 1156 */ 1157 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) { 1158 error = jme_rxbuf_dma_alloc(sc, i); 1159 if (error) 1160 return error; 1161 } 1162 return 0; 1163 } 1164 1165 static void 1166 jme_dma_free(struct jme_softc *sc) 1167 { 1168 struct jme_txdesc *txd; 1169 struct jme_rxdesc *rxd; 1170 struct jme_rxdata *rdata; 1171 int i, r; 1172 1173 /* Tx ring */ 1174 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1175 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1176 sc->jme_cdata.jme_tx_ring_map); 1177 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1178 sc->jme_cdata.jme_tx_ring, 1179 sc->jme_cdata.jme_tx_ring_map); 1180 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1181 sc->jme_cdata.jme_tx_ring_tag = NULL; 1182 } 1183 1184 /* Rx ring */ 1185 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1186 rdata = &sc->jme_cdata.jme_rx_data[r]; 1187 if (rdata->jme_rx_ring_tag != NULL) { 1188 bus_dmamap_unload(rdata->jme_rx_ring_tag, 1189 rdata->jme_rx_ring_map); 1190 bus_dmamem_free(rdata->jme_rx_ring_tag, 1191 rdata->jme_rx_ring, 1192 rdata->jme_rx_ring_map); 1193 bus_dma_tag_destroy(rdata->jme_rx_ring_tag); 1194 rdata->jme_rx_ring_tag = NULL; 1195 } 1196 } 1197 1198 /* Tx buffers */ 1199 if (sc->jme_cdata.jme_tx_tag != NULL) { 1200 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 1201 txd = &sc->jme_cdata.jme_txdesc[i]; 1202 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1203 txd->tx_dmamap); 1204 } 1205 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1206 sc->jme_cdata.jme_tx_tag = NULL; 1207 } 1208 1209 /* Rx buffers */ 1210 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1211 rdata = &sc->jme_cdata.jme_rx_data[r]; 1212 if (rdata->jme_rx_tag != NULL) { 1213 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 1214 rxd = &rdata->jme_rxdesc[i]; 1215 bus_dmamap_destroy(rdata->jme_rx_tag, 1216 rxd->rx_dmamap); 1217 } 1218 bus_dmamap_destroy(rdata->jme_rx_tag, 1219 rdata->jme_rx_sparemap); 1220 bus_dma_tag_destroy(rdata->jme_rx_tag); 1221 rdata->jme_rx_tag = NULL; 1222 } 1223 } 1224 1225 /* Shadow status block. */ 1226 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1227 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1228 sc->jme_cdata.jme_ssb_map); 1229 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1230 sc->jme_cdata.jme_ssb_block, 1231 sc->jme_cdata.jme_ssb_map); 1232 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1233 sc->jme_cdata.jme_ssb_tag = NULL; 1234 } 1235 1236 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1237 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1238 sc->jme_cdata.jme_buffer_tag = NULL; 1239 } 1240 if (sc->jme_cdata.jme_ring_tag != NULL) { 1241 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1242 sc->jme_cdata.jme_ring_tag = NULL; 1243 } 1244 1245 if (sc->jme_cdata.jme_txdesc != NULL) { 1246 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF); 1247 sc->jme_cdata.jme_txdesc = NULL; 1248 } 1249 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 1250 rdata = &sc->jme_cdata.jme_rx_data[r]; 1251 if (rdata->jme_rxdesc != NULL) { 1252 kfree(rdata->jme_rxdesc, M_DEVBUF); 1253 rdata->jme_rxdesc = NULL; 1254 } 1255 } 1256 } 1257 1258 /* 1259 * Make sure the interface is stopped at reboot time. 1260 */ 1261 static int 1262 jme_shutdown(device_t dev) 1263 { 1264 return jme_suspend(dev); 1265 } 1266 1267 #ifdef notyet 1268 /* 1269 * Unlike other ethernet controllers, JMC250 requires 1270 * explicit resetting link speed to 10/100Mbps as gigabit 1271 * link will cunsume more power than 375mA. 1272 * Note, we reset the link speed to 10/100Mbps with 1273 * auto-negotiation but we don't know whether that operation 1274 * would succeed or not as we have no control after powering 1275 * off. If the renegotiation fail WOL may not work. Running 1276 * at 1Gbps draws more power than 375mA at 3.3V which is 1277 * specified in PCI specification and that would result in 1278 * complete shutdowning power to ethernet controller. 1279 * 1280 * TODO 1281 * Save current negotiated media speed/duplex/flow-control 1282 * to softc and restore the same link again after resuming. 1283 * PHY handling such as power down/resetting to 100Mbps 1284 * may be better handled in suspend method in phy driver. 1285 */ 1286 static void 1287 jme_setlinkspeed(struct jme_softc *sc) 1288 { 1289 struct mii_data *mii; 1290 int aneg, i; 1291 1292 JME_LOCK_ASSERT(sc); 1293 1294 mii = device_get_softc(sc->jme_miibus); 1295 mii_pollstat(mii); 1296 aneg = 0; 1297 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1298 switch IFM_SUBTYPE(mii->mii_media_active) { 1299 case IFM_10_T: 1300 case IFM_100_TX: 1301 return; 1302 case IFM_1000_T: 1303 aneg++; 1304 default: 1305 break; 1306 } 1307 } 1308 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1309 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1310 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1311 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1312 BMCR_AUTOEN | BMCR_STARTNEG); 1313 DELAY(1000); 1314 if (aneg != 0) { 1315 /* Poll link state until jme(4) get a 10/100 link. */ 1316 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1317 mii_pollstat(mii); 1318 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1319 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1320 case IFM_10_T: 1321 case IFM_100_TX: 1322 jme_mac_config(sc); 1323 return; 1324 default: 1325 break; 1326 } 1327 } 1328 JME_UNLOCK(sc); 1329 pause("jmelnk", hz); 1330 JME_LOCK(sc); 1331 } 1332 if (i == MII_ANEGTICKS_GIGE) 1333 device_printf(sc->jme_dev, "establishing link failed, " 1334 "WOL may not work!"); 1335 } 1336 /* 1337 * No link, force MAC to have 100Mbps, full-duplex link. 1338 * This is the last resort and may/may not work. 1339 */ 1340 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1341 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1342 jme_mac_config(sc); 1343 } 1344 1345 static void 1346 jme_setwol(struct jme_softc *sc) 1347 { 1348 struct ifnet *ifp = &sc->arpcom.ac_if; 1349 uint32_t gpr, pmcs; 1350 uint16_t pmstat; 1351 int pmc; 1352 1353 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1354 /* No PME capability, PHY power down. */ 1355 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1356 MII_BMCR, BMCR_PDOWN); 1357 return; 1358 } 1359 1360 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1361 pmcs = CSR_READ_4(sc, JME_PMCS); 1362 pmcs &= ~PMCS_WOL_ENB_MASK; 1363 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1364 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1365 /* Enable PME message. */ 1366 gpr |= GPREG0_PME_ENB; 1367 /* For gigabit controllers, reset link speed to 10/100. */ 1368 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1369 jme_setlinkspeed(sc); 1370 } 1371 1372 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1373 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1374 1375 /* Request PME. */ 1376 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1377 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1378 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1379 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1380 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1381 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1382 /* No WOL, PHY power down. */ 1383 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1384 MII_BMCR, BMCR_PDOWN); 1385 } 1386 } 1387 #endif 1388 1389 static int 1390 jme_suspend(device_t dev) 1391 { 1392 struct jme_softc *sc = device_get_softc(dev); 1393 struct ifnet *ifp = &sc->arpcom.ac_if; 1394 1395 lwkt_serialize_enter(ifp->if_serializer); 1396 jme_stop(sc); 1397 #ifdef notyet 1398 jme_setwol(sc); 1399 #endif 1400 lwkt_serialize_exit(ifp->if_serializer); 1401 1402 return (0); 1403 } 1404 1405 static int 1406 jme_resume(device_t dev) 1407 { 1408 struct jme_softc *sc = device_get_softc(dev); 1409 struct ifnet *ifp = &sc->arpcom.ac_if; 1410 #ifdef notyet 1411 int pmc; 1412 #endif 1413 1414 lwkt_serialize_enter(ifp->if_serializer); 1415 1416 #ifdef notyet 1417 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1418 uint16_t pmstat; 1419 1420 pmstat = pci_read_config(sc->jme_dev, 1421 pmc + PCIR_POWER_STATUS, 2); 1422 /* Disable PME clear PME status. */ 1423 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1424 pci_write_config(sc->jme_dev, 1425 pmc + PCIR_POWER_STATUS, pmstat, 2); 1426 } 1427 #endif 1428 1429 if (ifp->if_flags & IFF_UP) 1430 jme_init(sc); 1431 1432 lwkt_serialize_exit(ifp->if_serializer); 1433 1434 return (0); 1435 } 1436 1437 static int 1438 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1439 { 1440 struct jme_txdesc *txd; 1441 struct jme_desc *desc; 1442 struct mbuf *m; 1443 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1444 int maxsegs, nsegs; 1445 int error, i, prod, symbol_desc; 1446 uint32_t cflags, flag64; 1447 1448 M_ASSERTPKTHDR((*m_head)); 1449 1450 prod = sc->jme_cdata.jme_tx_prod; 1451 txd = &sc->jme_cdata.jme_txdesc[prod]; 1452 1453 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1454 symbol_desc = 1; 1455 else 1456 symbol_desc = 0; 1457 1458 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) - 1459 (JME_TXD_RSVD + symbol_desc); 1460 if (maxsegs > JME_MAXTXSEGS) 1461 maxsegs = JME_MAXTXSEGS; 1462 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc), 1463 ("not enough segments %d\n", maxsegs)); 1464 1465 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag, 1466 txd->tx_dmamap, m_head, 1467 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1468 if (error) 1469 goto fail; 1470 1471 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1472 BUS_DMASYNC_PREWRITE); 1473 1474 m = *m_head; 1475 cflags = 0; 1476 1477 /* Configure checksum offload. */ 1478 if (m->m_pkthdr.csum_flags & CSUM_IP) 1479 cflags |= JME_TD_IPCSUM; 1480 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1481 cflags |= JME_TD_TCPCSUM; 1482 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1483 cflags |= JME_TD_UDPCSUM; 1484 1485 /* Configure VLAN. */ 1486 if (m->m_flags & M_VLANTAG) { 1487 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1488 cflags |= JME_TD_VLAN_TAG; 1489 } 1490 1491 desc = &sc->jme_cdata.jme_tx_ring[prod]; 1492 desc->flags = htole32(cflags); 1493 desc->addr_hi = htole32(m->m_pkthdr.len); 1494 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1495 /* 1496 * Use 64bits TX desc chain format. 1497 * 1498 * The first TX desc of the chain, which is setup here, 1499 * is just a symbol TX desc carrying no payload. 1500 */ 1501 flag64 = JME_TD_64BIT; 1502 desc->buflen = 0; 1503 desc->addr_lo = 0; 1504 1505 /* No effective TX desc is consumed */ 1506 i = 0; 1507 } else { 1508 /* 1509 * Use 32bits TX desc chain format. 1510 * 1511 * The first TX desc of the chain, which is setup here, 1512 * is an effective TX desc carrying the first segment of 1513 * the mbuf chain. 1514 */ 1515 flag64 = 0; 1516 desc->buflen = htole32(txsegs[0].ds_len); 1517 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1518 1519 /* One effective TX desc is consumed */ 1520 i = 1; 1521 } 1522 sc->jme_cdata.jme_tx_cnt++; 1523 KKASSERT(sc->jme_cdata.jme_tx_cnt - i < 1524 sc->jme_tx_desc_cnt - JME_TXD_RSVD); 1525 JME_DESC_INC(prod, sc->jme_tx_desc_cnt); 1526 1527 txd->tx_ndesc = 1 - i; 1528 for (; i < nsegs; i++) { 1529 desc = &sc->jme_cdata.jme_tx_ring[prod]; 1530 desc->flags = htole32(JME_TD_OWN | flag64); 1531 desc->buflen = htole32(txsegs[i].ds_len); 1532 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1533 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1534 1535 sc->jme_cdata.jme_tx_cnt++; 1536 KKASSERT(sc->jme_cdata.jme_tx_cnt <= 1537 sc->jme_tx_desc_cnt - JME_TXD_RSVD); 1538 JME_DESC_INC(prod, sc->jme_tx_desc_cnt); 1539 } 1540 1541 /* Update producer index. */ 1542 sc->jme_cdata.jme_tx_prod = prod; 1543 /* 1544 * Finally request interrupt and give the first descriptor 1545 * owenership to hardware. 1546 */ 1547 desc = txd->tx_desc; 1548 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1549 1550 txd->tx_m = m; 1551 txd->tx_ndesc += nsegs; 1552 1553 /* Sync descriptors. */ 1554 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1555 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE); 1556 return 0; 1557 fail: 1558 m_freem(*m_head); 1559 *m_head = NULL; 1560 return error; 1561 } 1562 1563 static void 1564 jme_start(struct ifnet *ifp) 1565 { 1566 struct jme_softc *sc = ifp->if_softc; 1567 struct mbuf *m_head; 1568 int enq = 0; 1569 1570 ASSERT_SERIALIZED(ifp->if_serializer); 1571 1572 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1573 ifq_purge(&ifp->if_snd); 1574 return; 1575 } 1576 1577 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1578 return; 1579 1580 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc)) 1581 jme_txeof(sc); 1582 1583 while (!ifq_is_empty(&ifp->if_snd)) { 1584 /* 1585 * Check number of available TX descs, always 1586 * leave JME_TXD_RSVD free TX descs. 1587 */ 1588 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1589 sc->jme_tx_desc_cnt - JME_TXD_RSVD) { 1590 ifp->if_flags |= IFF_OACTIVE; 1591 break; 1592 } 1593 1594 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1595 if (m_head == NULL) 1596 break; 1597 1598 /* 1599 * Pack the data into the transmit ring. If we 1600 * don't have room, set the OACTIVE flag and wait 1601 * for the NIC to drain the ring. 1602 */ 1603 if (jme_encap(sc, &m_head)) { 1604 KKASSERT(m_head == NULL); 1605 ifp->if_oerrors++; 1606 ifp->if_flags |= IFF_OACTIVE; 1607 break; 1608 } 1609 enq++; 1610 1611 /* 1612 * If there's a BPF listener, bounce a copy of this frame 1613 * to him. 1614 */ 1615 ETHER_BPF_MTAP(ifp, m_head); 1616 } 1617 1618 if (enq > 0) { 1619 /* 1620 * Reading TXCSR takes very long time under heavy load 1621 * so cache TXCSR value and writes the ORed value with 1622 * the kick command to the TXCSR. This saves one register 1623 * access cycle. 1624 */ 1625 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1626 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1627 /* Set a timeout in case the chip goes out to lunch. */ 1628 ifp->if_timer = JME_TX_TIMEOUT; 1629 } 1630 } 1631 1632 static void 1633 jme_watchdog(struct ifnet *ifp) 1634 { 1635 struct jme_softc *sc = ifp->if_softc; 1636 1637 ASSERT_SERIALIZED(ifp->if_serializer); 1638 1639 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1640 if_printf(ifp, "watchdog timeout (missed link)\n"); 1641 ifp->if_oerrors++; 1642 jme_init(sc); 1643 return; 1644 } 1645 1646 jme_txeof(sc); 1647 if (sc->jme_cdata.jme_tx_cnt == 0) { 1648 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1649 "-- recovering\n"); 1650 if (!ifq_is_empty(&ifp->if_snd)) 1651 if_devstart(ifp); 1652 return; 1653 } 1654 1655 if_printf(ifp, "watchdog timeout\n"); 1656 ifp->if_oerrors++; 1657 jme_init(sc); 1658 if (!ifq_is_empty(&ifp->if_snd)) 1659 if_devstart(ifp); 1660 } 1661 1662 static int 1663 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1664 { 1665 struct jme_softc *sc = ifp->if_softc; 1666 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1667 struct ifreq *ifr = (struct ifreq *)data; 1668 int error = 0, mask; 1669 1670 ASSERT_SERIALIZED(ifp->if_serializer); 1671 1672 switch (cmd) { 1673 case SIOCSIFMTU: 1674 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1675 (!(sc->jme_caps & JME_CAP_JUMBO) && 1676 ifr->ifr_mtu > JME_MAX_MTU)) { 1677 error = EINVAL; 1678 break; 1679 } 1680 1681 if (ifp->if_mtu != ifr->ifr_mtu) { 1682 /* 1683 * No special configuration is required when interface 1684 * MTU is changed but availability of Tx checksum 1685 * offload should be chcked against new MTU size as 1686 * FIFO size is just 2K. 1687 */ 1688 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1689 ifp->if_capenable &= ~IFCAP_TXCSUM; 1690 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1691 } 1692 ifp->if_mtu = ifr->ifr_mtu; 1693 if (ifp->if_flags & IFF_RUNNING) 1694 jme_init(sc); 1695 } 1696 break; 1697 1698 case SIOCSIFFLAGS: 1699 if (ifp->if_flags & IFF_UP) { 1700 if (ifp->if_flags & IFF_RUNNING) { 1701 if ((ifp->if_flags ^ sc->jme_if_flags) & 1702 (IFF_PROMISC | IFF_ALLMULTI)) 1703 jme_set_filter(sc); 1704 } else { 1705 jme_init(sc); 1706 } 1707 } else { 1708 if (ifp->if_flags & IFF_RUNNING) 1709 jme_stop(sc); 1710 } 1711 sc->jme_if_flags = ifp->if_flags; 1712 break; 1713 1714 case SIOCADDMULTI: 1715 case SIOCDELMULTI: 1716 if (ifp->if_flags & IFF_RUNNING) 1717 jme_set_filter(sc); 1718 break; 1719 1720 case SIOCSIFMEDIA: 1721 case SIOCGIFMEDIA: 1722 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1723 break; 1724 1725 case SIOCSIFCAP: 1726 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1727 1728 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1729 if (IFCAP_TXCSUM & ifp->if_capabilities) { 1730 ifp->if_capenable ^= IFCAP_TXCSUM; 1731 if (IFCAP_TXCSUM & ifp->if_capenable) 1732 ifp->if_hwassist |= JME_CSUM_FEATURES; 1733 else 1734 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1735 } 1736 } 1737 if ((mask & IFCAP_RXCSUM) && 1738 (IFCAP_RXCSUM & ifp->if_capabilities)) { 1739 uint32_t reg; 1740 1741 ifp->if_capenable ^= IFCAP_RXCSUM; 1742 reg = CSR_READ_4(sc, JME_RXMAC); 1743 reg &= ~RXMAC_CSUM_ENB; 1744 if (ifp->if_capenable & IFCAP_RXCSUM) 1745 reg |= RXMAC_CSUM_ENB; 1746 CSR_WRITE_4(sc, JME_RXMAC, reg); 1747 } 1748 1749 if ((mask & IFCAP_VLAN_HWTAGGING) && 1750 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) { 1751 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1752 jme_set_vlan(sc); 1753 } 1754 break; 1755 1756 default: 1757 error = ether_ioctl(ifp, cmd, data); 1758 break; 1759 } 1760 return (error); 1761 } 1762 1763 static void 1764 jme_mac_config(struct jme_softc *sc) 1765 { 1766 struct mii_data *mii; 1767 uint32_t ghc, rxmac, txmac, txpause, gp1; 1768 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1769 1770 mii = device_get_softc(sc->jme_miibus); 1771 1772 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1773 DELAY(10); 1774 CSR_WRITE_4(sc, JME_GHC, 0); 1775 ghc = 0; 1776 rxmac = CSR_READ_4(sc, JME_RXMAC); 1777 rxmac &= ~RXMAC_FC_ENB; 1778 txmac = CSR_READ_4(sc, JME_TXMAC); 1779 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1780 txpause = CSR_READ_4(sc, JME_TXPFC); 1781 txpause &= ~TXPFC_PAUSE_ENB; 1782 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1783 ghc |= GHC_FULL_DUPLEX; 1784 rxmac &= ~RXMAC_COLL_DET_ENB; 1785 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1786 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1787 TXMAC_FRAME_BURST); 1788 #ifdef notyet 1789 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1790 txpause |= TXPFC_PAUSE_ENB; 1791 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1792 rxmac |= RXMAC_FC_ENB; 1793 #endif 1794 /* Disable retry transmit timer/retry limit. */ 1795 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1796 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1797 } else { 1798 rxmac |= RXMAC_COLL_DET_ENB; 1799 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1800 /* Enable retry transmit timer/retry limit. */ 1801 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1802 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1803 } 1804 1805 /* 1806 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1807 */ 1808 gp1 = CSR_READ_4(sc, JME_GPREG1); 1809 gp1 &= ~GPREG1_WA_HDX; 1810 1811 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1812 hdx = 1; 1813 1814 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1815 case IFM_10_T: 1816 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 1817 if (hdx) 1818 gp1 |= GPREG1_WA_HDX; 1819 break; 1820 1821 case IFM_100_TX: 1822 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 1823 if (hdx) 1824 gp1 |= GPREG1_WA_HDX; 1825 1826 /* 1827 * Use extended FIFO depth to workaround CRC errors 1828 * emitted by chips before JMC250B 1829 */ 1830 phyconf = JMPHY_CONF_EXTFIFO; 1831 break; 1832 1833 case IFM_1000_T: 1834 if (sc->jme_caps & JME_CAP_FASTETH) 1835 break; 1836 1837 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 1838 if (hdx) 1839 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1840 break; 1841 1842 default: 1843 break; 1844 } 1845 CSR_WRITE_4(sc, JME_GHC, ghc); 1846 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1847 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1848 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1849 1850 if (sc->jme_workaround & JME_WA_EXTFIFO) { 1851 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1852 JMPHY_CONF, phyconf); 1853 } 1854 if (sc->jme_workaround & JME_WA_HDX) 1855 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1856 } 1857 1858 static void 1859 jme_intr(void *xsc) 1860 { 1861 struct jme_softc *sc = xsc; 1862 struct ifnet *ifp = &sc->arpcom.ac_if; 1863 uint32_t status; 1864 int r; 1865 1866 ASSERT_SERIALIZED(ifp->if_serializer); 1867 1868 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1869 if (status == 0 || status == 0xFFFFFFFF) 1870 return; 1871 1872 /* Disable interrupts. */ 1873 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1874 1875 status = CSR_READ_4(sc, JME_INTR_STATUS); 1876 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1877 goto back; 1878 1879 /* Reset PCC counter/timer and Ack interrupts. */ 1880 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1881 1882 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1883 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1884 1885 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 1886 if (status & jme_rx_status[r].jme_coal) { 1887 status |= jme_rx_status[r].jme_coal | 1888 jme_rx_status[r].jme_comp; 1889 } 1890 } 1891 1892 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1893 1894 if (ifp->if_flags & IFF_RUNNING) { 1895 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1896 jme_rx_intr(sc, status); 1897 1898 if (status & INTR_RXQ_DESC_EMPTY) { 1899 /* 1900 * Notify hardware availability of new Rx buffers. 1901 * Reading RXCSR takes very long time under heavy 1902 * load so cache RXCSR value and writes the ORed 1903 * value with the kick command to the RXCSR. This 1904 * saves one register access cycle. 1905 */ 1906 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1907 RXCSR_RX_ENB | RXCSR_RXQ_START); 1908 } 1909 1910 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1911 jme_txeof(sc); 1912 if (!ifq_is_empty(&ifp->if_snd)) 1913 if_devstart(ifp); 1914 } 1915 } 1916 back: 1917 /* Reenable interrupts. */ 1918 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1919 } 1920 1921 static void 1922 jme_txeof(struct jme_softc *sc) 1923 { 1924 struct ifnet *ifp = &sc->arpcom.ac_if; 1925 struct jme_txdesc *txd; 1926 uint32_t status; 1927 int cons, nsegs; 1928 1929 cons = sc->jme_cdata.jme_tx_cons; 1930 if (cons == sc->jme_cdata.jme_tx_prod) 1931 return; 1932 1933 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1934 sc->jme_cdata.jme_tx_ring_map, 1935 BUS_DMASYNC_POSTREAD); 1936 1937 /* 1938 * Go through our Tx list and free mbufs for those 1939 * frames which have been transmitted. 1940 */ 1941 while (cons != sc->jme_cdata.jme_tx_prod) { 1942 txd = &sc->jme_cdata.jme_txdesc[cons]; 1943 KASSERT(txd->tx_m != NULL, 1944 ("%s: freeing NULL mbuf!\n", __func__)); 1945 1946 status = le32toh(txd->tx_desc->flags); 1947 if ((status & JME_TD_OWN) == JME_TD_OWN) 1948 break; 1949 1950 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 1951 ifp->if_oerrors++; 1952 } else { 1953 ifp->if_opackets++; 1954 if (status & JME_TD_COLLISION) { 1955 ifp->if_collisions += 1956 le32toh(txd->tx_desc->buflen) & 1957 JME_TD_BUF_LEN_MASK; 1958 } 1959 } 1960 1961 /* 1962 * Only the first descriptor of multi-descriptor 1963 * transmission is updated so driver have to skip entire 1964 * chained buffers for the transmiited frame. In other 1965 * words, JME_TD_OWN bit is valid only at the first 1966 * descriptor of a multi-descriptor transmission. 1967 */ 1968 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1969 sc->jme_cdata.jme_tx_ring[cons].flags = 0; 1970 JME_DESC_INC(cons, sc->jme_tx_desc_cnt); 1971 } 1972 1973 /* Reclaim transferred mbufs. */ 1974 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 1975 m_freem(txd->tx_m); 1976 txd->tx_m = NULL; 1977 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 1978 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 1979 ("%s: Active Tx desc counter was garbled\n", __func__)); 1980 txd->tx_ndesc = 0; 1981 } 1982 sc->jme_cdata.jme_tx_cons = cons; 1983 1984 if (sc->jme_cdata.jme_tx_cnt == 0) 1985 ifp->if_timer = 0; 1986 1987 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 1988 sc->jme_tx_desc_cnt - JME_TXD_RSVD) 1989 ifp->if_flags &= ~IFF_OACTIVE; 1990 1991 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1992 sc->jme_cdata.jme_tx_ring_map, 1993 BUS_DMASYNC_PREWRITE); 1994 } 1995 1996 static __inline void 1997 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count) 1998 { 1999 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2000 int i; 2001 2002 for (i = 0; i < count; ++i) { 2003 struct jme_desc *desc = &rdata->jme_rx_ring[cons]; 2004 2005 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2006 desc->buflen = htole32(MCLBYTES); 2007 JME_DESC_INC(cons, sc->jme_rx_desc_cnt); 2008 } 2009 } 2010 2011 /* Receive a frame. */ 2012 static void 2013 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain) 2014 { 2015 struct ifnet *ifp = &sc->arpcom.ac_if; 2016 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2017 struct jme_desc *desc; 2018 struct jme_rxdesc *rxd; 2019 struct mbuf *mp, *m; 2020 uint32_t flags, status; 2021 int cons, count, nsegs; 2022 2023 cons = rdata->jme_rx_cons; 2024 desc = &rdata->jme_rx_ring[cons]; 2025 flags = le32toh(desc->flags); 2026 status = le32toh(desc->buflen); 2027 nsegs = JME_RX_NSEGS(status); 2028 2029 JME_RSS_DPRINTF(sc, 10, "ring%d, flags 0x%08x, " 2030 "hash 0x%08x, hash type 0x%08x\n", 2031 ring, flags, desc->addr_hi, desc->addr_lo); 2032 2033 if (status & JME_RX_ERR_STAT) { 2034 ifp->if_ierrors++; 2035 jme_discard_rxbufs(sc, ring, cons, nsegs); 2036 #ifdef JME_SHOW_ERRORS 2037 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2038 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2039 #endif 2040 rdata->jme_rx_cons += nsegs; 2041 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt; 2042 return; 2043 } 2044 2045 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2046 for (count = 0; count < nsegs; count++, 2047 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) { 2048 rxd = &rdata->jme_rxdesc[cons]; 2049 mp = rxd->rx_m; 2050 2051 /* Add a new receive buffer to the ring. */ 2052 if (jme_newbuf(sc, ring, rxd, 0) != 0) { 2053 ifp->if_iqdrops++; 2054 /* Reuse buffer. */ 2055 jme_discard_rxbufs(sc, ring, cons, nsegs - count); 2056 if (rdata->jme_rxhead != NULL) { 2057 m_freem(rdata->jme_rxhead); 2058 JME_RXCHAIN_RESET(sc, ring); 2059 } 2060 break; 2061 } 2062 2063 /* 2064 * Assume we've received a full sized frame. 2065 * Actual size is fixed when we encounter the end of 2066 * multi-segmented frame. 2067 */ 2068 mp->m_len = MCLBYTES; 2069 2070 /* Chain received mbufs. */ 2071 if (rdata->jme_rxhead == NULL) { 2072 rdata->jme_rxhead = mp; 2073 rdata->jme_rxtail = mp; 2074 } else { 2075 /* 2076 * Receive processor can receive a maximum frame 2077 * size of 65535 bytes. 2078 */ 2079 mp->m_flags &= ~M_PKTHDR; 2080 rdata->jme_rxtail->m_next = mp; 2081 rdata->jme_rxtail = mp; 2082 } 2083 2084 if (count == nsegs - 1) { 2085 /* Last desc. for this frame. */ 2086 m = rdata->jme_rxhead; 2087 /* XXX assert PKTHDR? */ 2088 m->m_flags |= M_PKTHDR; 2089 m->m_pkthdr.len = rdata->jme_rxlen; 2090 if (nsegs > 1) { 2091 /* Set first mbuf size. */ 2092 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2093 /* Set last mbuf size. */ 2094 mp->m_len = rdata->jme_rxlen - 2095 ((MCLBYTES - JME_RX_PAD_BYTES) + 2096 (MCLBYTES * (nsegs - 2))); 2097 } else { 2098 m->m_len = rdata->jme_rxlen; 2099 } 2100 m->m_pkthdr.rcvif = ifp; 2101 2102 /* 2103 * Account for 10bytes auto padding which is used 2104 * to align IP header on 32bit boundary. Also note, 2105 * CRC bytes is automatically removed by the 2106 * hardware. 2107 */ 2108 m->m_data += JME_RX_PAD_BYTES; 2109 2110 /* Set checksum information. */ 2111 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2112 (flags & JME_RD_IPV4)) { 2113 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2114 if (flags & JME_RD_IPCSUM) 2115 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2116 if ((flags & JME_RD_MORE_FRAG) == 0 && 2117 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2118 (JME_RD_TCP | JME_RD_TCPCSUM) || 2119 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2120 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2121 m->m_pkthdr.csum_flags |= 2122 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2123 m->m_pkthdr.csum_data = 0xffff; 2124 } 2125 } 2126 2127 /* Check for VLAN tagged packets. */ 2128 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2129 (flags & JME_RD_VLAN_TAG)) { 2130 m->m_pkthdr.ether_vlantag = 2131 flags & JME_RD_VLAN_MASK; 2132 m->m_flags |= M_VLANTAG; 2133 } 2134 2135 ifp->if_ipackets++; 2136 /* Pass it on. */ 2137 ether_input_chain(ifp, m, chain); 2138 2139 /* Reset mbuf chains. */ 2140 JME_RXCHAIN_RESET(sc, ring); 2141 #ifdef JME_RSS_DEBUG 2142 sc->jme_rx_ring_pkt[ring]++; 2143 #endif 2144 } 2145 } 2146 2147 rdata->jme_rx_cons += nsegs; 2148 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt; 2149 } 2150 2151 static int 2152 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain, 2153 int count) 2154 { 2155 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2156 struct jme_desc *desc; 2157 int nsegs, prog, pktlen; 2158 2159 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map, 2160 BUS_DMASYNC_POSTREAD); 2161 2162 prog = 0; 2163 for (;;) { 2164 #ifdef DEVICE_POLLING 2165 if (count >= 0 && count-- == 0) 2166 break; 2167 #endif 2168 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons]; 2169 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2170 break; 2171 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2172 break; 2173 2174 /* 2175 * Check number of segments against received bytes. 2176 * Non-matching value would indicate that hardware 2177 * is still trying to update Rx descriptors. I'm not 2178 * sure whether this check is needed. 2179 */ 2180 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2181 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2182 if (nsegs != howmany(pktlen, MCLBYTES)) { 2183 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) " 2184 "and packet size(%d) mismach\n", 2185 nsegs, pktlen); 2186 break; 2187 } 2188 2189 /* Received a frame. */ 2190 jme_rxpkt(sc, ring, chain); 2191 prog++; 2192 } 2193 2194 if (prog > 0) { 2195 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map, 2196 BUS_DMASYNC_PREWRITE); 2197 } 2198 return prog; 2199 } 2200 2201 static void 2202 jme_rxeof(struct jme_softc *sc, int ring) 2203 { 2204 struct mbuf_chain chain[MAXCPU]; 2205 2206 ether_input_chain_init(chain); 2207 if (jme_rxeof_chain(sc, ring, chain, -1)) 2208 ether_input_dispatch(chain); 2209 } 2210 2211 static void 2212 jme_tick(void *xsc) 2213 { 2214 struct jme_softc *sc = xsc; 2215 struct ifnet *ifp = &sc->arpcom.ac_if; 2216 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2217 2218 lwkt_serialize_enter(ifp->if_serializer); 2219 2220 mii_tick(mii); 2221 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2222 2223 lwkt_serialize_exit(ifp->if_serializer); 2224 } 2225 2226 static void 2227 jme_reset(struct jme_softc *sc) 2228 { 2229 #ifdef foo 2230 /* Stop receiver, transmitter. */ 2231 jme_stop_rx(sc); 2232 jme_stop_tx(sc); 2233 #endif 2234 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2235 DELAY(10); 2236 CSR_WRITE_4(sc, JME_GHC, 0); 2237 } 2238 2239 static void 2240 jme_init(void *xsc) 2241 { 2242 struct jme_softc *sc = xsc; 2243 struct ifnet *ifp = &sc->arpcom.ac_if; 2244 struct mii_data *mii; 2245 uint8_t eaddr[ETHER_ADDR_LEN]; 2246 bus_addr_t paddr; 2247 uint32_t reg; 2248 int error, r; 2249 2250 ASSERT_SERIALIZED(ifp->if_serializer); 2251 2252 /* 2253 * Cancel any pending I/O. 2254 */ 2255 jme_stop(sc); 2256 2257 /* 2258 * Reset the chip to a known state. 2259 */ 2260 jme_reset(sc); 2261 2262 sc->jme_txd_spare = 2263 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES); 2264 KKASSERT(sc->jme_txd_spare >= 1); 2265 2266 /* 2267 * If we use 64bit address mode for transmitting, each Tx request 2268 * needs one more symbol descriptor. 2269 */ 2270 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 2271 sc->jme_txd_spare += 1; 2272 2273 if (sc->jme_flags & JME_FLAG_RSS) 2274 jme_enable_rss(sc); 2275 else 2276 jme_disable_rss(sc); 2277 2278 /* Init RX descriptors */ 2279 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2280 error = jme_init_rx_ring(sc, r); 2281 if (error) { 2282 if_printf(ifp, "initialization failed: " 2283 "no memory for %dth RX ring.\n", r); 2284 jme_stop(sc); 2285 return; 2286 } 2287 } 2288 2289 /* Init TX descriptors */ 2290 jme_init_tx_ring(sc); 2291 2292 /* Initialize shadow status block. */ 2293 jme_init_ssb(sc); 2294 2295 /* Reprogram the station address. */ 2296 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2297 CSR_WRITE_4(sc, JME_PAR0, 2298 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2299 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2300 2301 /* 2302 * Configure Tx queue. 2303 * Tx priority queue weight value : 0 2304 * Tx FIFO threshold for processing next packet : 16QW 2305 * Maximum Tx DMA length : 512 2306 * Allow Tx DMA burst. 2307 */ 2308 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2309 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2310 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2311 sc->jme_txcsr |= sc->jme_tx_dma_size; 2312 sc->jme_txcsr |= TXCSR_DMA_BURST; 2313 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2314 2315 /* Set Tx descriptor counter. */ 2316 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt); 2317 2318 /* Set Tx ring address to the hardware. */ 2319 paddr = sc->jme_cdata.jme_tx_ring_paddr; 2320 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2321 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2322 2323 /* Configure TxMAC parameters. */ 2324 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2325 reg |= TXMAC_THRESH_1_PKT; 2326 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2327 CSR_WRITE_4(sc, JME_TXMAC, reg); 2328 2329 /* 2330 * Configure Rx queue. 2331 * FIFO full threshold for transmitting Tx pause packet : 128T 2332 * FIFO threshold for processing next packet : 128QW 2333 * Rx queue 0 select 2334 * Max Rx DMA length : 128 2335 * Rx descriptor retry : 32 2336 * Rx descriptor retry time gap : 256ns 2337 * Don't receive runt/bad frame. 2338 */ 2339 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2340 #if 0 2341 /* 2342 * Since Rx FIFO size is 4K bytes, receiving frames larger 2343 * than 4K bytes will suffer from Rx FIFO overruns. So 2344 * decrease FIFO threshold to reduce the FIFO overruns for 2345 * frames larger than 4000 bytes. 2346 * For best performance of standard MTU sized frames use 2347 * maximum allowable FIFO threshold, 128QW. 2348 */ 2349 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2350 JME_RX_FIFO_SIZE) 2351 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2352 else 2353 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2354 #else 2355 /* Improve PCI Express compatibility */ 2356 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2357 #endif 2358 sc->jme_rxcsr |= sc->jme_rx_dma_size; 2359 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2360 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2361 /* XXX TODO DROP_BAD */ 2362 2363 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2364 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r)); 2365 2366 /* Set Rx descriptor counter. */ 2367 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt); 2368 2369 /* Set Rx ring address to the hardware. */ 2370 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr; 2371 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2372 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2373 } 2374 2375 /* Clear receive filter. */ 2376 CSR_WRITE_4(sc, JME_RXMAC, 0); 2377 2378 /* Set up the receive filter. */ 2379 jme_set_filter(sc); 2380 jme_set_vlan(sc); 2381 2382 /* 2383 * Disable all WOL bits as WOL can interfere normal Rx 2384 * operation. Also clear WOL detection status bits. 2385 */ 2386 reg = CSR_READ_4(sc, JME_PMCS); 2387 reg &= ~PMCS_WOL_ENB_MASK; 2388 CSR_WRITE_4(sc, JME_PMCS, reg); 2389 2390 /* 2391 * Pad 10bytes right before received frame. This will greatly 2392 * help Rx performance on strict-alignment architectures as 2393 * it does not need to copy the frame to align the payload. 2394 */ 2395 reg = CSR_READ_4(sc, JME_RXMAC); 2396 reg |= RXMAC_PAD_10BYTES; 2397 2398 if (ifp->if_capenable & IFCAP_RXCSUM) 2399 reg |= RXMAC_CSUM_ENB; 2400 CSR_WRITE_4(sc, JME_RXMAC, reg); 2401 2402 /* Configure general purpose reg0 */ 2403 reg = CSR_READ_4(sc, JME_GPREG0); 2404 reg &= ~GPREG0_PCC_UNIT_MASK; 2405 /* Set PCC timer resolution to micro-seconds unit. */ 2406 reg |= GPREG0_PCC_UNIT_US; 2407 /* 2408 * Disable all shadow register posting as we have to read 2409 * JME_INTR_STATUS register in jme_intr. Also it seems 2410 * that it's hard to synchronize interrupt status between 2411 * hardware and software with shadow posting due to 2412 * requirements of bus_dmamap_sync(9). 2413 */ 2414 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2415 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2416 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2417 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2418 /* Disable posting of DW0. */ 2419 reg &= ~GPREG0_POST_DW0_ENB; 2420 /* Clear PME message. */ 2421 reg &= ~GPREG0_PME_ENB; 2422 /* Set PHY address. */ 2423 reg &= ~GPREG0_PHY_ADDR_MASK; 2424 reg |= sc->jme_phyaddr; 2425 CSR_WRITE_4(sc, JME_GPREG0, reg); 2426 2427 /* Configure Tx queue 0 packet completion coalescing. */ 2428 jme_set_tx_coal(sc); 2429 2430 /* Configure Rx queue 0 packet completion coalescing. */ 2431 jme_set_rx_coal(sc); 2432 2433 /* Configure shadow status block but don't enable posting. */ 2434 paddr = sc->jme_cdata.jme_ssb_block_paddr; 2435 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2436 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2437 2438 /* Disable Timer 1 and Timer 2. */ 2439 CSR_WRITE_4(sc, JME_TIMER1, 0); 2440 CSR_WRITE_4(sc, JME_TIMER2, 0); 2441 2442 /* Configure retry transmit period, retry limit value. */ 2443 CSR_WRITE_4(sc, JME_TXTRHD, 2444 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2445 TXTRHD_RT_PERIOD_MASK) | 2446 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2447 TXTRHD_RT_LIMIT_SHIFT)); 2448 2449 #ifdef DEVICE_POLLING 2450 if (!(ifp->if_flags & IFF_POLLING)) 2451 #endif 2452 /* Initialize the interrupt mask. */ 2453 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2454 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2455 2456 /* 2457 * Enabling Tx/Rx DMA engines and Rx queue processing is 2458 * done after detection of valid link in jme_miibus_statchg. 2459 */ 2460 sc->jme_flags &= ~JME_FLAG_LINK; 2461 2462 /* Set the current media. */ 2463 mii = device_get_softc(sc->jme_miibus); 2464 mii_mediachg(mii); 2465 2466 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2467 2468 ifp->if_flags |= IFF_RUNNING; 2469 ifp->if_flags &= ~IFF_OACTIVE; 2470 } 2471 2472 static void 2473 jme_stop(struct jme_softc *sc) 2474 { 2475 struct ifnet *ifp = &sc->arpcom.ac_if; 2476 struct jme_txdesc *txd; 2477 struct jme_rxdesc *rxd; 2478 struct jme_rxdata *rdata; 2479 int i, r; 2480 2481 ASSERT_SERIALIZED(ifp->if_serializer); 2482 2483 /* 2484 * Mark the interface down and cancel the watchdog timer. 2485 */ 2486 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2487 ifp->if_timer = 0; 2488 2489 callout_stop(&sc->jme_tick_ch); 2490 sc->jme_flags &= ~JME_FLAG_LINK; 2491 2492 /* 2493 * Disable interrupts. 2494 */ 2495 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2496 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2497 2498 /* Disable updating shadow status block. */ 2499 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2500 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2501 2502 /* Stop receiver, transmitter. */ 2503 jme_stop_rx(sc); 2504 jme_stop_tx(sc); 2505 2506 /* 2507 * Free partial finished RX segments 2508 */ 2509 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2510 rdata = &sc->jme_cdata.jme_rx_data[r]; 2511 if (rdata->jme_rxhead != NULL) 2512 m_freem(rdata->jme_rxhead); 2513 JME_RXCHAIN_RESET(sc, r); 2514 } 2515 2516 /* 2517 * Free RX and TX mbufs still in the queues. 2518 */ 2519 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 2520 rdata = &sc->jme_cdata.jme_rx_data[r]; 2521 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 2522 rxd = &rdata->jme_rxdesc[i]; 2523 if (rxd->rx_m != NULL) { 2524 bus_dmamap_unload(rdata->jme_rx_tag, 2525 rxd->rx_dmamap); 2526 m_freem(rxd->rx_m); 2527 rxd->rx_m = NULL; 2528 } 2529 } 2530 } 2531 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 2532 txd = &sc->jme_cdata.jme_txdesc[i]; 2533 if (txd->tx_m != NULL) { 2534 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 2535 txd->tx_dmamap); 2536 m_freem(txd->tx_m); 2537 txd->tx_m = NULL; 2538 txd->tx_ndesc = 0; 2539 } 2540 } 2541 } 2542 2543 static void 2544 jme_stop_tx(struct jme_softc *sc) 2545 { 2546 uint32_t reg; 2547 int i; 2548 2549 reg = CSR_READ_4(sc, JME_TXCSR); 2550 if ((reg & TXCSR_TX_ENB) == 0) 2551 return; 2552 reg &= ~TXCSR_TX_ENB; 2553 CSR_WRITE_4(sc, JME_TXCSR, reg); 2554 for (i = JME_TIMEOUT; i > 0; i--) { 2555 DELAY(1); 2556 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2557 break; 2558 } 2559 if (i == 0) 2560 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2561 } 2562 2563 static void 2564 jme_stop_rx(struct jme_softc *sc) 2565 { 2566 uint32_t reg; 2567 int i; 2568 2569 reg = CSR_READ_4(sc, JME_RXCSR); 2570 if ((reg & RXCSR_RX_ENB) == 0) 2571 return; 2572 reg &= ~RXCSR_RX_ENB; 2573 CSR_WRITE_4(sc, JME_RXCSR, reg); 2574 for (i = JME_TIMEOUT; i > 0; i--) { 2575 DELAY(1); 2576 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2577 break; 2578 } 2579 if (i == 0) 2580 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2581 } 2582 2583 static void 2584 jme_init_tx_ring(struct jme_softc *sc) 2585 { 2586 struct jme_chain_data *cd; 2587 struct jme_txdesc *txd; 2588 int i; 2589 2590 sc->jme_cdata.jme_tx_prod = 0; 2591 sc->jme_cdata.jme_tx_cons = 0; 2592 sc->jme_cdata.jme_tx_cnt = 0; 2593 2594 cd = &sc->jme_cdata; 2595 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc)); 2596 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 2597 txd = &sc->jme_cdata.jme_txdesc[i]; 2598 txd->tx_m = NULL; 2599 txd->tx_desc = &cd->jme_tx_ring[i]; 2600 txd->tx_ndesc = 0; 2601 } 2602 2603 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2604 sc->jme_cdata.jme_tx_ring_map, 2605 BUS_DMASYNC_PREWRITE); 2606 } 2607 2608 static void 2609 jme_init_ssb(struct jme_softc *sc) 2610 { 2611 struct jme_chain_data *cd; 2612 2613 cd = &sc->jme_cdata; 2614 bzero(cd->jme_ssb_block, JME_SSB_SIZE); 2615 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, 2616 BUS_DMASYNC_PREWRITE); 2617 } 2618 2619 static int 2620 jme_init_rx_ring(struct jme_softc *sc, int ring) 2621 { 2622 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2623 struct jme_rxdesc *rxd; 2624 int i; 2625 2626 KKASSERT(rdata->jme_rxhead == NULL && 2627 rdata->jme_rxtail == NULL && 2628 rdata->jme_rxlen == 0); 2629 rdata->jme_rx_cons = 0; 2630 2631 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc)); 2632 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 2633 int error; 2634 2635 rxd = &rdata->jme_rxdesc[i]; 2636 rxd->rx_m = NULL; 2637 rxd->rx_desc = &rdata->jme_rx_ring[i]; 2638 error = jme_newbuf(sc, ring, rxd, 1); 2639 if (error) 2640 return error; 2641 } 2642 2643 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map, 2644 BUS_DMASYNC_PREWRITE); 2645 return 0; 2646 } 2647 2648 static int 2649 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init) 2650 { 2651 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2652 struct jme_desc *desc; 2653 struct mbuf *m; 2654 bus_dma_segment_t segs; 2655 bus_dmamap_t map; 2656 int error, nsegs; 2657 2658 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2659 if (m == NULL) 2660 return ENOBUFS; 2661 /* 2662 * JMC250 has 64bit boundary alignment limitation so jme(4) 2663 * takes advantage of 10 bytes padding feature of hardware 2664 * in order not to copy entire frame to align IP header on 2665 * 32bit boundary. 2666 */ 2667 m->m_len = m->m_pkthdr.len = MCLBYTES; 2668 2669 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag, 2670 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs, 2671 BUS_DMA_NOWAIT); 2672 if (error) { 2673 m_freem(m); 2674 if (init) 2675 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2676 return error; 2677 } 2678 2679 if (rxd->rx_m != NULL) { 2680 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap, 2681 BUS_DMASYNC_POSTREAD); 2682 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap); 2683 } 2684 map = rxd->rx_dmamap; 2685 rxd->rx_dmamap = rdata->jme_rx_sparemap; 2686 rdata->jme_rx_sparemap = map; 2687 rxd->rx_m = m; 2688 2689 desc = rxd->rx_desc; 2690 desc->buflen = htole32(segs.ds_len); 2691 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr)); 2692 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr)); 2693 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2694 2695 return 0; 2696 } 2697 2698 static void 2699 jme_set_vlan(struct jme_softc *sc) 2700 { 2701 struct ifnet *ifp = &sc->arpcom.ac_if; 2702 uint32_t reg; 2703 2704 ASSERT_SERIALIZED(ifp->if_serializer); 2705 2706 reg = CSR_READ_4(sc, JME_RXMAC); 2707 reg &= ~RXMAC_VLAN_ENB; 2708 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 2709 reg |= RXMAC_VLAN_ENB; 2710 CSR_WRITE_4(sc, JME_RXMAC, reg); 2711 } 2712 2713 static void 2714 jme_set_filter(struct jme_softc *sc) 2715 { 2716 struct ifnet *ifp = &sc->arpcom.ac_if; 2717 struct ifmultiaddr *ifma; 2718 uint32_t crc; 2719 uint32_t mchash[2]; 2720 uint32_t rxcfg; 2721 2722 ASSERT_SERIALIZED(ifp->if_serializer); 2723 2724 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2725 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2726 RXMAC_ALLMULTI); 2727 2728 /* 2729 * Always accept frames destined to our station address. 2730 * Always accept broadcast frames. 2731 */ 2732 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2733 2734 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2735 if (ifp->if_flags & IFF_PROMISC) 2736 rxcfg |= RXMAC_PROMISC; 2737 if (ifp->if_flags & IFF_ALLMULTI) 2738 rxcfg |= RXMAC_ALLMULTI; 2739 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 2740 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 2741 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2742 return; 2743 } 2744 2745 /* 2746 * Set up the multicast address filter by passing all multicast 2747 * addresses through a CRC generator, and then using the low-order 2748 * 6 bits as an index into the 64 bit multicast hash table. The 2749 * high order bits select the register, while the rest of the bits 2750 * select the bit within the register. 2751 */ 2752 rxcfg |= RXMAC_MULTICAST; 2753 bzero(mchash, sizeof(mchash)); 2754 2755 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2756 if (ifma->ifma_addr->sa_family != AF_LINK) 2757 continue; 2758 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2759 ifma->ifma_addr), ETHER_ADDR_LEN); 2760 2761 /* Just want the 6 least significant bits. */ 2762 crc &= 0x3f; 2763 2764 /* Set the corresponding bit in the hash table. */ 2765 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2766 } 2767 2768 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2769 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2770 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2771 } 2772 2773 static int 2774 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 2775 { 2776 struct jme_softc *sc = arg1; 2777 struct ifnet *ifp = &sc->arpcom.ac_if; 2778 int error, v; 2779 2780 lwkt_serialize_enter(ifp->if_serializer); 2781 2782 v = sc->jme_tx_coal_to; 2783 error = sysctl_handle_int(oidp, &v, 0, req); 2784 if (error || req->newptr == NULL) 2785 goto back; 2786 2787 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 2788 error = EINVAL; 2789 goto back; 2790 } 2791 2792 if (v != sc->jme_tx_coal_to) { 2793 sc->jme_tx_coal_to = v; 2794 if (ifp->if_flags & IFF_RUNNING) 2795 jme_set_tx_coal(sc); 2796 } 2797 back: 2798 lwkt_serialize_exit(ifp->if_serializer); 2799 return error; 2800 } 2801 2802 static int 2803 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 2804 { 2805 struct jme_softc *sc = arg1; 2806 struct ifnet *ifp = &sc->arpcom.ac_if; 2807 int error, v; 2808 2809 lwkt_serialize_enter(ifp->if_serializer); 2810 2811 v = sc->jme_tx_coal_pkt; 2812 error = sysctl_handle_int(oidp, &v, 0, req); 2813 if (error || req->newptr == NULL) 2814 goto back; 2815 2816 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 2817 error = EINVAL; 2818 goto back; 2819 } 2820 2821 if (v != sc->jme_tx_coal_pkt) { 2822 sc->jme_tx_coal_pkt = v; 2823 if (ifp->if_flags & IFF_RUNNING) 2824 jme_set_tx_coal(sc); 2825 } 2826 back: 2827 lwkt_serialize_exit(ifp->if_serializer); 2828 return error; 2829 } 2830 2831 static int 2832 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 2833 { 2834 struct jme_softc *sc = arg1; 2835 struct ifnet *ifp = &sc->arpcom.ac_if; 2836 int error, v; 2837 2838 lwkt_serialize_enter(ifp->if_serializer); 2839 2840 v = sc->jme_rx_coal_to; 2841 error = sysctl_handle_int(oidp, &v, 0, req); 2842 if (error || req->newptr == NULL) 2843 goto back; 2844 2845 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 2846 error = EINVAL; 2847 goto back; 2848 } 2849 2850 if (v != sc->jme_rx_coal_to) { 2851 sc->jme_rx_coal_to = v; 2852 if (ifp->if_flags & IFF_RUNNING) 2853 jme_set_rx_coal(sc); 2854 } 2855 back: 2856 lwkt_serialize_exit(ifp->if_serializer); 2857 return error; 2858 } 2859 2860 static int 2861 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 2862 { 2863 struct jme_softc *sc = arg1; 2864 struct ifnet *ifp = &sc->arpcom.ac_if; 2865 int error, v; 2866 2867 lwkt_serialize_enter(ifp->if_serializer); 2868 2869 v = sc->jme_rx_coal_pkt; 2870 error = sysctl_handle_int(oidp, &v, 0, req); 2871 if (error || req->newptr == NULL) 2872 goto back; 2873 2874 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 2875 error = EINVAL; 2876 goto back; 2877 } 2878 2879 if (v != sc->jme_rx_coal_pkt) { 2880 sc->jme_rx_coal_pkt = v; 2881 if (ifp->if_flags & IFF_RUNNING) 2882 jme_set_rx_coal(sc); 2883 } 2884 back: 2885 lwkt_serialize_exit(ifp->if_serializer); 2886 return error; 2887 } 2888 2889 static void 2890 jme_set_tx_coal(struct jme_softc *sc) 2891 { 2892 uint32_t reg; 2893 2894 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 2895 PCCTX_COAL_TO_MASK; 2896 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 2897 PCCTX_COAL_PKT_MASK; 2898 reg |= PCCTX_COAL_TXQ0; 2899 CSR_WRITE_4(sc, JME_PCCTX, reg); 2900 } 2901 2902 static void 2903 jme_set_rx_coal(struct jme_softc *sc) 2904 { 2905 uint32_t reg; 2906 int r; 2907 2908 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 2909 PCCRX_COAL_TO_MASK; 2910 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 2911 PCCRX_COAL_PKT_MASK; 2912 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) { 2913 if (r < sc->jme_rx_ring_inuse) 2914 CSR_WRITE_4(sc, JME_PCCRX(r), reg); 2915 else 2916 CSR_WRITE_4(sc, JME_PCCRX(r), 0); 2917 } 2918 } 2919 2920 #ifdef DEVICE_POLLING 2921 2922 static void 2923 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2924 { 2925 struct jme_softc *sc = ifp->if_softc; 2926 struct mbuf_chain chain[MAXCPU]; 2927 uint32_t status; 2928 int r, prog = 0; 2929 2930 ASSERT_SERIALIZED(ifp->if_serializer); 2931 2932 switch (cmd) { 2933 case POLL_REGISTER: 2934 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2935 break; 2936 2937 case POLL_DEREGISTER: 2938 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2939 break; 2940 2941 case POLL_AND_CHECK_STATUS: 2942 case POLL_ONLY: 2943 status = CSR_READ_4(sc, JME_INTR_STATUS); 2944 2945 ether_input_chain_init(chain); 2946 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) 2947 prog += jme_rxeof_chain(sc, r, chain, count); 2948 if (prog) 2949 ether_input_dispatch(chain); 2950 2951 if (status & INTR_RXQ_DESC_EMPTY) { 2952 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2953 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2954 RXCSR_RX_ENB | RXCSR_RXQ_START); 2955 } 2956 2957 jme_txeof(sc); 2958 if (!ifq_is_empty(&ifp->if_snd)) 2959 if_devstart(ifp); 2960 break; 2961 } 2962 } 2963 2964 #endif /* DEVICE_POLLING */ 2965 2966 static int 2967 jme_rxring_dma_alloc(struct jme_softc *sc, int ring) 2968 { 2969 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2970 bus_dmamem_t dmem; 2971 int error; 2972 2973 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag, 2974 JME_RX_RING_ALIGN, 0, 2975 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2976 JME_RX_RING_SIZE(sc), 2977 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 2978 if (error) { 2979 device_printf(sc->jme_dev, 2980 "could not allocate %dth Rx ring.\n", ring); 2981 return error; 2982 } 2983 rdata->jme_rx_ring_tag = dmem.dmem_tag; 2984 rdata->jme_rx_ring_map = dmem.dmem_map; 2985 rdata->jme_rx_ring = dmem.dmem_addr; 2986 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr; 2987 2988 return 0; 2989 } 2990 2991 static int 2992 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring) 2993 { 2994 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring]; 2995 int i, error; 2996 2997 /* Create tag for Rx buffers. */ 2998 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 2999 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 3000 BUS_SPACE_MAXADDR, /* lowaddr */ 3001 BUS_SPACE_MAXADDR, /* highaddr */ 3002 NULL, NULL, /* filter, filterarg */ 3003 MCLBYTES, /* maxsize */ 3004 1, /* nsegments */ 3005 MCLBYTES, /* maxsegsize */ 3006 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */ 3007 &rdata->jme_rx_tag); 3008 if (error) { 3009 device_printf(sc->jme_dev, 3010 "could not create %dth Rx DMA tag.\n", ring); 3011 return error; 3012 } 3013 3014 /* Create DMA maps for Rx buffers. */ 3015 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3016 &rdata->jme_rx_sparemap); 3017 if (error) { 3018 device_printf(sc->jme_dev, 3019 "could not create %dth spare Rx dmamap.\n", ring); 3020 bus_dma_tag_destroy(rdata->jme_rx_tag); 3021 rdata->jme_rx_tag = NULL; 3022 return error; 3023 } 3024 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 3025 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i]; 3026 3027 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK, 3028 &rxd->rx_dmamap); 3029 if (error) { 3030 int j; 3031 3032 device_printf(sc->jme_dev, 3033 "could not create %dth Rx dmamap " 3034 "for %dth RX ring.\n", i, ring); 3035 3036 for (j = 0; j < i; ++j) { 3037 rxd = &rdata->jme_rxdesc[j]; 3038 bus_dmamap_destroy(rdata->jme_rx_tag, 3039 rxd->rx_dmamap); 3040 } 3041 bus_dmamap_destroy(rdata->jme_rx_tag, 3042 rdata->jme_rx_sparemap); 3043 bus_dma_tag_destroy(rdata->jme_rx_tag); 3044 rdata->jme_rx_tag = NULL; 3045 return error; 3046 } 3047 } 3048 return 0; 3049 } 3050 3051 static void 3052 jme_rx_intr(struct jme_softc *sc, uint32_t status) 3053 { 3054 struct mbuf_chain chain[MAXCPU]; 3055 int r, prog = 0; 3056 3057 ether_input_chain_init(chain); 3058 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) { 3059 if (status & jme_rx_status[r].jme_coal) 3060 prog += jme_rxeof_chain(sc, r, chain, -1); 3061 } 3062 if (prog) 3063 ether_input_dispatch(chain); 3064 } 3065 3066 static void 3067 jme_enable_rss(struct jme_softc *sc) 3068 { 3069 uint32_t rssc, key, ind; 3070 int i; 3071 3072 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt; 3073 3074 rssc = RSSC_HASH_64_ENTRY; 3075 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP; 3076 rssc |= sc->jme_rx_ring_inuse >> 1; 3077 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc); 3078 CSR_WRITE_4(sc, JME_RSSC, rssc); 3079 3080 key = 0x6d5a6d5a; /* XXX */ 3081 for (i = 0; i < RSSKEY_NREGS; ++i) 3082 CSR_WRITE_4(sc, RSSKEY_REG(i), key); 3083 3084 ind = 0; 3085 if (sc->jme_rx_ring_inuse == JME_NRXRING_2) { 3086 ind = 0x01000100; 3087 } else if (sc->jme_rx_ring_inuse == JME_NRXRING_4) { 3088 ind = 0x03020100; 3089 } else { 3090 panic("%s: invalid # of RX rings (%d)\n", 3091 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse); 3092 } 3093 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind); 3094 for (i = 0; i < RSSTBL_NREGS; ++i) 3095 CSR_WRITE_4(sc, RSSTBL_REG(i), ind); 3096 } 3097 3098 static void 3099 jme_disable_rss(struct jme_softc *sc) 3100 { 3101 sc->jme_rx_ring_inuse = JME_NRXRING_1; 3102 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 3103 } 3104