1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/interrupt.h> 36 #include <sys/malloc.h> 37 #include <sys/proc.h> 38 #include <sys/rman.h> 39 #include <sys/serialize.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/sysctl.h> 43 44 #include <net/ethernet.h> 45 #include <net/if.h> 46 #include <net/bpf.h> 47 #include <net/if_arp.h> 48 #include <net/if_dl.h> 49 #include <net/if_media.h> 50 #include <net/ifq_var.h> 51 #include <net/vlan/if_vlan_var.h> 52 #include <net/vlan/if_vlan_ether.h> 53 54 #include <dev/netif/mii_layer/miivar.h> 55 #include <dev/netif/mii_layer/jmphyreg.h> 56 57 #include <bus/pci/pcireg.h> 58 #include <bus/pci/pcivar.h> 59 #include <bus/pci/pcidevs.h> 60 61 #include <dev/netif/jme/if_jmereg.h> 62 #include <dev/netif/jme/if_jmevar.h> 63 64 #include "miibus_if.h" 65 66 /* Define the following to disable printing Rx errors. */ 67 #undef JME_SHOW_ERRORS 68 69 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 70 71 static int jme_probe(device_t); 72 static int jme_attach(device_t); 73 static int jme_detach(device_t); 74 static int jme_shutdown(device_t); 75 static int jme_suspend(device_t); 76 static int jme_resume(device_t); 77 78 static int jme_miibus_readreg(device_t, int, int); 79 static int jme_miibus_writereg(device_t, int, int, int); 80 static void jme_miibus_statchg(device_t); 81 82 static void jme_init(void *); 83 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 84 static void jme_start(struct ifnet *); 85 static void jme_watchdog(struct ifnet *); 86 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 87 static int jme_mediachange(struct ifnet *); 88 89 static void jme_intr(void *); 90 static void jme_txeof(struct jme_softc *); 91 static void jme_rxeof(struct jme_softc *); 92 93 static int jme_dma_alloc(struct jme_softc *); 94 static void jme_dma_free(struct jme_softc *); 95 static void jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int); 96 static void jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int, 97 bus_size_t, int); 98 static int jme_init_rx_ring(struct jme_softc *); 99 static void jme_init_tx_ring(struct jme_softc *); 100 static void jme_init_ssb(struct jme_softc *); 101 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int); 102 static int jme_encap(struct jme_softc *, struct mbuf **); 103 static void jme_rxpkt(struct jme_softc *); 104 105 static void jme_tick(void *); 106 static void jme_stop(struct jme_softc *); 107 static void jme_reset(struct jme_softc *); 108 static void jme_set_vlan(struct jme_softc *); 109 static void jme_set_filter(struct jme_softc *); 110 static void jme_stop_tx(struct jme_softc *); 111 static void jme_stop_rx(struct jme_softc *); 112 static void jme_mac_config(struct jme_softc *); 113 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 114 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 115 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 116 #ifdef notyet 117 static void jme_setwol(struct jme_softc *); 118 static void jme_setlinkspeed(struct jme_softc *); 119 #endif 120 static void jme_set_tx_coal(struct jme_softc *); 121 static void jme_set_rx_coal(struct jme_softc *); 122 123 static void jme_sysctl_node(struct jme_softc *); 124 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 125 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 126 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 127 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 128 129 /* 130 * Devices supported by this driver. 131 */ 132 static const struct jme_dev { 133 uint16_t jme_vendorid; 134 uint16_t jme_deviceid; 135 uint32_t jme_caps; 136 const char *jme_name; 137 } jme_devs[] = { 138 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 139 JME_CAP_JUMBO, 140 "JMicron Inc, JMC250 Gigabit Ethernet" }, 141 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 142 JME_CAP_FASTETH, 143 "JMicron Inc, JMC260 Fast Ethernet" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static device_method_t jme_methods[] = { 148 /* Device interface. */ 149 DEVMETHOD(device_probe, jme_probe), 150 DEVMETHOD(device_attach, jme_attach), 151 DEVMETHOD(device_detach, jme_detach), 152 DEVMETHOD(device_shutdown, jme_shutdown), 153 DEVMETHOD(device_suspend, jme_suspend), 154 DEVMETHOD(device_resume, jme_resume), 155 156 /* Bus interface. */ 157 DEVMETHOD(bus_print_child, bus_generic_print_child), 158 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 159 160 /* MII interface. */ 161 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 162 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 163 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 164 165 { NULL, NULL } 166 }; 167 168 static driver_t jme_driver = { 169 "jme", 170 jme_methods, 171 sizeof(struct jme_softc) 172 }; 173 174 static devclass_t jme_devclass; 175 176 DECLARE_DUMMY_MODULE(if_jme); 177 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 178 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0); 179 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); 180 181 /* 182 * Read a PHY register on the MII of the JMC250. 183 */ 184 static int 185 jme_miibus_readreg(device_t dev, int phy, int reg) 186 { 187 struct jme_softc *sc = device_get_softc(dev); 188 uint32_t val; 189 int i; 190 191 /* For FPGA version, PHY address 0 should be ignored. */ 192 if (sc->jme_caps & JME_CAP_FPGA) { 193 if (phy == 0) 194 return (0); 195 } else { 196 if (sc->jme_phyaddr != phy) 197 return (0); 198 } 199 200 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 201 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 202 203 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 204 DELAY(1); 205 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 206 break; 207 } 208 if (i == 0) { 209 device_printf(sc->jme_dev, "phy read timeout: " 210 "phy %d, reg %d\n", phy, reg); 211 return (0); 212 } 213 214 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 215 } 216 217 /* 218 * Write a PHY register on the MII of the JMC250. 219 */ 220 static int 221 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 222 { 223 struct jme_softc *sc = device_get_softc(dev); 224 int i; 225 226 /* For FPGA version, PHY address 0 should be ignored. */ 227 if (sc->jme_caps & JME_CAP_FPGA) { 228 if (phy == 0) 229 return (0); 230 } else { 231 if (sc->jme_phyaddr != phy) 232 return (0); 233 } 234 235 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 236 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 237 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 238 239 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 240 DELAY(1); 241 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 242 break; 243 } 244 if (i == 0) { 245 device_printf(sc->jme_dev, "phy write timeout: " 246 "phy %d, reg %d\n", phy, reg); 247 } 248 249 return (0); 250 } 251 252 /* 253 * Callback from MII layer when media changes. 254 */ 255 static void 256 jme_miibus_statchg(device_t dev) 257 { 258 struct jme_softc *sc = device_get_softc(dev); 259 struct ifnet *ifp = &sc->arpcom.ac_if; 260 struct mii_data *mii; 261 struct jme_txdesc *txd; 262 bus_addr_t paddr; 263 int i; 264 265 ASSERT_SERIALIZED(ifp->if_serializer); 266 267 if ((ifp->if_flags & IFF_RUNNING) == 0) 268 return; 269 270 mii = device_get_softc(sc->jme_miibus); 271 272 sc->jme_flags &= ~JME_FLAG_LINK; 273 if ((mii->mii_media_status & IFM_AVALID) != 0) { 274 switch (IFM_SUBTYPE(mii->mii_media_active)) { 275 case IFM_10_T: 276 case IFM_100_TX: 277 sc->jme_flags |= JME_FLAG_LINK; 278 break; 279 case IFM_1000_T: 280 if (sc->jme_caps & JME_CAP_FASTETH) 281 break; 282 sc->jme_flags |= JME_FLAG_LINK; 283 break; 284 default: 285 break; 286 } 287 } 288 289 /* 290 * Disabling Rx/Tx MACs have a side-effect of resetting 291 * JME_TXNDA/JME_RXNDA register to the first address of 292 * Tx/Rx descriptor address. So driver should reset its 293 * internal procucer/consumer pointer and reclaim any 294 * allocated resources. Note, just saving the value of 295 * JME_TXNDA and JME_RXNDA registers before stopping MAC 296 * and restoring JME_TXNDA/JME_RXNDA register is not 297 * sufficient to make sure correct MAC state because 298 * stopping MAC operation can take a while and hardware 299 * might have updated JME_TXNDA/JME_RXNDA registers 300 * during the stop operation. 301 */ 302 303 /* Disable interrupts */ 304 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 305 306 /* Stop driver */ 307 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 308 ifp->if_timer = 0; 309 callout_stop(&sc->jme_tick_ch); 310 311 /* Stop receiver/transmitter. */ 312 jme_stop_rx(sc); 313 jme_stop_tx(sc); 314 315 jme_rxeof(sc); 316 if (sc->jme_cdata.jme_rxhead != NULL) 317 m_freem(sc->jme_cdata.jme_rxhead); 318 JME_RXCHAIN_RESET(sc); 319 320 jme_txeof(sc); 321 if (sc->jme_cdata.jme_tx_cnt != 0) { 322 /* Remove queued packets for transmit. */ 323 for (i = 0; i < JME_TX_RING_CNT; i++) { 324 txd = &sc->jme_cdata.jme_txdesc[i]; 325 if (txd->tx_m != NULL) { 326 bus_dmamap_unload( 327 sc->jme_cdata.jme_tx_tag, 328 txd->tx_dmamap); 329 m_freem(txd->tx_m); 330 txd->tx_m = NULL; 331 txd->tx_ndesc = 0; 332 ifp->if_oerrors++; 333 } 334 } 335 } 336 337 /* 338 * Reuse configured Rx descriptors and reset 339 * procuder/consumer index. 340 */ 341 sc->jme_cdata.jme_rx_cons = 0; 342 343 jme_init_tx_ring(sc); 344 345 /* Initialize shadow status block. */ 346 jme_init_ssb(sc); 347 348 /* Program MAC with resolved speed/duplex/flow-control. */ 349 if (sc->jme_flags & JME_FLAG_LINK) { 350 jme_mac_config(sc); 351 352 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 353 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 354 355 /* Set Tx ring address to the hardware. */ 356 paddr = JME_TX_RING_ADDR(sc, 0); 357 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 358 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 359 360 /* Set Rx ring address to the hardware. */ 361 paddr = JME_RX_RING_ADDR(sc, 0); 362 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 363 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 364 365 /* Restart receiver/transmitter. */ 366 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 367 RXCSR_RXQ_START); 368 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 369 } 370 371 ifp->if_flags |= IFF_RUNNING; 372 ifp->if_flags &= ~IFF_OACTIVE; 373 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 374 375 /* Reenable interrupts. */ 376 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 377 } 378 379 /* 380 * Get the current interface media status. 381 */ 382 static void 383 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 384 { 385 struct jme_softc *sc = ifp->if_softc; 386 struct mii_data *mii = device_get_softc(sc->jme_miibus); 387 388 ASSERT_SERIALIZED(ifp->if_serializer); 389 390 mii_pollstat(mii); 391 ifmr->ifm_status = mii->mii_media_status; 392 ifmr->ifm_active = mii->mii_media_active; 393 } 394 395 /* 396 * Set hardware to newly-selected media. 397 */ 398 static int 399 jme_mediachange(struct ifnet *ifp) 400 { 401 struct jme_softc *sc = ifp->if_softc; 402 struct mii_data *mii = device_get_softc(sc->jme_miibus); 403 int error; 404 405 ASSERT_SERIALIZED(ifp->if_serializer); 406 407 if (mii->mii_instance != 0) { 408 struct mii_softc *miisc; 409 410 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 411 mii_phy_reset(miisc); 412 } 413 error = mii_mediachg(mii); 414 415 return (error); 416 } 417 418 static int 419 jme_probe(device_t dev) 420 { 421 const struct jme_dev *sp; 422 uint16_t vid, did; 423 424 vid = pci_get_vendor(dev); 425 did = pci_get_device(dev); 426 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 427 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 428 struct jme_softc *sc = device_get_softc(dev); 429 430 sc->jme_caps = sp->jme_caps; 431 device_set_desc(dev, sp->jme_name); 432 return (0); 433 } 434 } 435 return (ENXIO); 436 } 437 438 static int 439 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 440 { 441 uint32_t reg; 442 int i; 443 444 *val = 0; 445 for (i = JME_TIMEOUT; i > 0; i--) { 446 reg = CSR_READ_4(sc, JME_SMBCSR); 447 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 448 break; 449 DELAY(1); 450 } 451 452 if (i == 0) { 453 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 454 return (ETIMEDOUT); 455 } 456 457 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 458 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 459 for (i = JME_TIMEOUT; i > 0; i--) { 460 DELAY(1); 461 reg = CSR_READ_4(sc, JME_SMBINTF); 462 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 463 break; 464 } 465 466 if (i == 0) { 467 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 468 return (ETIMEDOUT); 469 } 470 471 reg = CSR_READ_4(sc, JME_SMBINTF); 472 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 473 474 return (0); 475 } 476 477 static int 478 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 479 { 480 uint8_t fup, reg, val; 481 uint32_t offset; 482 int match; 483 484 offset = 0; 485 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 486 fup != JME_EEPROM_SIG0) 487 return (ENOENT); 488 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 489 fup != JME_EEPROM_SIG1) 490 return (ENOENT); 491 match = 0; 492 do { 493 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 494 break; 495 /* Check for the end of EEPROM descriptor. */ 496 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 497 break; 498 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, 499 JME_EEPROM_PAGE_BAR1) == fup) { 500 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 501 break; 502 if (reg >= JME_PAR0 && 503 reg < JME_PAR0 + ETHER_ADDR_LEN) { 504 if (jme_eeprom_read_byte(sc, offset + 2, 505 &val) != 0) 506 break; 507 eaddr[reg - JME_PAR0] = val; 508 match++; 509 } 510 } 511 /* Try next eeprom descriptor. */ 512 offset += JME_EEPROM_DESC_BYTES; 513 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 514 515 if (match == ETHER_ADDR_LEN) 516 return (0); 517 518 return (ENOENT); 519 } 520 521 static void 522 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 523 { 524 uint32_t par0, par1; 525 526 /* Read station address. */ 527 par0 = CSR_READ_4(sc, JME_PAR0); 528 par1 = CSR_READ_4(sc, JME_PAR1); 529 par1 &= 0xFFFF; 530 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 531 device_printf(sc->jme_dev, 532 "generating fake ethernet address.\n"); 533 par0 = karc4random(); 534 /* Set OUI to JMicron. */ 535 eaddr[0] = 0x00; 536 eaddr[1] = 0x1B; 537 eaddr[2] = 0x8C; 538 eaddr[3] = (par0 >> 16) & 0xff; 539 eaddr[4] = (par0 >> 8) & 0xff; 540 eaddr[5] = par0 & 0xff; 541 } else { 542 eaddr[0] = (par0 >> 0) & 0xFF; 543 eaddr[1] = (par0 >> 8) & 0xFF; 544 eaddr[2] = (par0 >> 16) & 0xFF; 545 eaddr[3] = (par0 >> 24) & 0xFF; 546 eaddr[4] = (par1 >> 0) & 0xFF; 547 eaddr[5] = (par1 >> 8) & 0xFF; 548 } 549 } 550 551 static int 552 jme_attach(device_t dev) 553 { 554 struct jme_softc *sc = device_get_softc(dev); 555 struct ifnet *ifp = &sc->arpcom.ac_if; 556 uint32_t reg; 557 uint16_t did; 558 uint8_t pcie_ptr, rev; 559 int error = 0; 560 uint8_t eaddr[ETHER_ADDR_LEN]; 561 562 sc->jme_dev = dev; 563 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 564 565 ifp = &sc->arpcom.ac_if; 566 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 567 568 callout_init(&sc->jme_tick_ch); 569 570 #ifndef BURN_BRIDGES 571 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 572 uint32_t irq, mem; 573 574 irq = pci_read_config(dev, PCIR_INTLINE, 4); 575 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 576 577 device_printf(dev, "chip is in D%d power mode " 578 "-- setting to D0\n", pci_get_powerstate(dev)); 579 580 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 581 582 pci_write_config(dev, PCIR_INTLINE, irq, 4); 583 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 584 } 585 #endif /* !BURN_BRIDGE */ 586 587 /* Enable bus mastering */ 588 pci_enable_busmaster(dev); 589 590 /* 591 * Allocate IO memory 592 * 593 * JMC250 supports both memory mapped and I/O register space 594 * access. Because I/O register access should use different 595 * BARs to access registers it's waste of time to use I/O 596 * register spce access. JMC250 uses 16K to map entire memory 597 * space. 598 */ 599 sc->jme_mem_rid = JME_PCIR_BAR; 600 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 601 &sc->jme_mem_rid, RF_ACTIVE); 602 if (sc->jme_mem_res == NULL) { 603 device_printf(dev, "can't allocate IO memory\n"); 604 return ENXIO; 605 } 606 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 607 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 608 609 /* 610 * Allocate IRQ 611 */ 612 sc->jme_irq_rid = 0; 613 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 614 &sc->jme_irq_rid, 615 RF_SHAREABLE | RF_ACTIVE); 616 if (sc->jme_irq_res == NULL) { 617 device_printf(dev, "can't allocate irq\n"); 618 error = ENXIO; 619 goto fail; 620 } 621 622 /* 623 * Extract revisions 624 */ 625 reg = CSR_READ_4(sc, JME_CHIPMODE); 626 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 627 CHIPMODE_NOT_FPGA) { 628 sc->jme_caps |= JME_CAP_FPGA; 629 if (bootverbose) { 630 device_printf(dev, "FPGA revision: 0x%04x\n", 631 (reg & CHIPMODE_FPGA_REV_MASK) >> 632 CHIPMODE_FPGA_REV_SHIFT); 633 } 634 } 635 636 /* NOTE: FM revision is put in the upper 4 bits */ 637 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 638 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 639 if (bootverbose) 640 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 641 642 did = pci_get_device(dev); 643 switch (did) { 644 case PCI_PRODUCT_JMICRON_JMC250: 645 if (rev == JME_REV1_A2) 646 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 647 break; 648 649 case PCI_PRODUCT_JMICRON_JMC260: 650 if (rev == JME_REV2) 651 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 652 break; 653 654 default: 655 panic("unknown device id 0x%04x\n", did); 656 } 657 if (rev >= JME_REV2) { 658 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 659 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 660 GHC_TXMAC_CLKSRC_1000; 661 } 662 663 /* Reset the ethernet controller. */ 664 jme_reset(sc); 665 666 /* Get station address. */ 667 reg = CSR_READ_4(sc, JME_SMBCSR); 668 if (reg & SMBCSR_EEPROM_PRESENT) 669 error = jme_eeprom_macaddr(sc, eaddr); 670 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 671 if (error != 0 && (bootverbose)) { 672 device_printf(dev, "ethernet hardware address " 673 "not found in EEPROM.\n"); 674 } 675 jme_reg_macaddr(sc, eaddr); 676 } 677 678 /* 679 * Save PHY address. 680 * Integrated JR0211 has fixed PHY address whereas FPGA version 681 * requires PHY probing to get correct PHY address. 682 */ 683 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 684 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 685 GPREG0_PHY_ADDR_MASK; 686 if (bootverbose) { 687 device_printf(dev, "PHY is at address %d.\n", 688 sc->jme_phyaddr); 689 } 690 } else { 691 sc->jme_phyaddr = 0; 692 } 693 694 /* Set max allowable DMA size. */ 695 pcie_ptr = pci_get_pciecap_ptr(dev); 696 if (pcie_ptr != 0) { 697 uint16_t ctrl; 698 699 sc->jme_caps |= JME_CAP_PCIE; 700 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 701 if (bootverbose) { 702 device_printf(dev, "Read request size : %d bytes.\n", 703 128 << ((ctrl >> 12) & 0x07)); 704 device_printf(dev, "TLP payload size : %d bytes.\n", 705 128 << ((ctrl >> 5) & 0x07)); 706 } 707 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 708 case PCIEM_DEVCTL_MAX_READRQ_128: 709 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 710 break; 711 case PCIEM_DEVCTL_MAX_READRQ_256: 712 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 713 break; 714 default: 715 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 716 break; 717 } 718 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 719 } else { 720 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 721 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 722 } 723 724 #ifdef notyet 725 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 726 sc->jme_caps |= JME_CAP_PMCAP; 727 #endif 728 729 /* 730 * Create sysctl tree 731 */ 732 jme_sysctl_node(sc); 733 734 /* Allocate DMA stuffs */ 735 error = jme_dma_alloc(sc); 736 if (error) 737 goto fail; 738 739 ifp->if_softc = sc; 740 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 741 ifp->if_init = jme_init; 742 ifp->if_ioctl = jme_ioctl; 743 ifp->if_start = jme_start; 744 ifp->if_watchdog = jme_watchdog; 745 ifq_set_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1); 746 ifq_set_ready(&ifp->if_snd); 747 748 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 749 ifp->if_capabilities = IFCAP_HWCSUM | 750 IFCAP_VLAN_MTU | 751 IFCAP_VLAN_HWTAGGING; 752 ifp->if_hwassist = JME_CSUM_FEATURES; 753 ifp->if_capenable = ifp->if_capabilities; 754 755 /* Set up MII bus. */ 756 error = mii_phy_probe(dev, &sc->jme_miibus, 757 jme_mediachange, jme_mediastatus); 758 if (error) { 759 device_printf(dev, "no PHY found!\n"); 760 goto fail; 761 } 762 763 /* 764 * Save PHYADDR for FPGA mode PHY. 765 */ 766 if (sc->jme_caps & JME_CAP_FPGA) { 767 struct mii_data *mii = device_get_softc(sc->jme_miibus); 768 769 if (mii->mii_instance != 0) { 770 struct mii_softc *miisc; 771 772 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 773 if (miisc->mii_phy != 0) { 774 sc->jme_phyaddr = miisc->mii_phy; 775 break; 776 } 777 } 778 if (sc->jme_phyaddr != 0) { 779 device_printf(sc->jme_dev, 780 "FPGA PHY is at %d\n", sc->jme_phyaddr); 781 /* vendor magic. */ 782 jme_miibus_writereg(dev, sc->jme_phyaddr, 783 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 784 785 /* XXX should we clear JME_WA_EXTFIFO */ 786 } 787 } 788 } 789 790 ether_ifattach(ifp, eaddr, NULL); 791 792 /* Tell the upper layer(s) we support long frames. */ 793 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 794 795 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc, 796 &sc->jme_irq_handle, ifp->if_serializer); 797 if (error) { 798 device_printf(dev, "could not set up interrupt handler.\n"); 799 ether_ifdetach(ifp); 800 goto fail; 801 } 802 803 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res)); 804 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 805 return 0; 806 fail: 807 jme_detach(dev); 808 return (error); 809 } 810 811 static int 812 jme_detach(device_t dev) 813 { 814 struct jme_softc *sc = device_get_softc(dev); 815 816 if (device_is_attached(dev)) { 817 struct ifnet *ifp = &sc->arpcom.ac_if; 818 819 lwkt_serialize_enter(ifp->if_serializer); 820 jme_stop(sc); 821 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 822 lwkt_serialize_exit(ifp->if_serializer); 823 824 ether_ifdetach(ifp); 825 } 826 827 if (sc->jme_sysctl_tree != NULL) 828 sysctl_ctx_free(&sc->jme_sysctl_ctx); 829 830 if (sc->jme_miibus != NULL) 831 device_delete_child(dev, sc->jme_miibus); 832 bus_generic_detach(dev); 833 834 if (sc->jme_irq_res != NULL) { 835 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 836 sc->jme_irq_res); 837 } 838 839 if (sc->jme_mem_res != NULL) { 840 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 841 sc->jme_mem_res); 842 } 843 844 jme_dma_free(sc); 845 846 return (0); 847 } 848 849 static void 850 jme_sysctl_node(struct jme_softc *sc) 851 { 852 sysctl_ctx_init(&sc->jme_sysctl_ctx); 853 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 854 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 855 device_get_nameunit(sc->jme_dev), 856 CTLFLAG_RD, 0, ""); 857 if (sc->jme_sysctl_tree == NULL) { 858 device_printf(sc->jme_dev, "can't add sysctl node\n"); 859 return; 860 } 861 862 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 863 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 864 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 865 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 866 867 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 868 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 869 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 870 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 871 872 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 873 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 874 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 875 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 876 877 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 878 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 879 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 880 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 881 882 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 883 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 884 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 885 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 886 } 887 888 static void 889 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 890 { 891 if (error) 892 return; 893 894 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 895 *((bus_addr_t *)arg) = segs->ds_addr; 896 } 897 898 static void 899 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs, 900 bus_size_t mapsz __unused, int error) 901 { 902 struct jme_dmamap_ctx *ctx = xctx; 903 int i; 904 905 if (error) 906 return; 907 908 if (nsegs > ctx->nsegs) { 909 ctx->nsegs = 0; 910 return; 911 } 912 913 ctx->nsegs = nsegs; 914 for (i = 0; i < nsegs; ++i) 915 ctx->segs[i] = segs[i]; 916 } 917 918 static int 919 jme_dma_alloc(struct jme_softc *sc) 920 { 921 struct jme_txdesc *txd; 922 struct jme_rxdesc *rxd; 923 bus_addr_t busaddr, lowaddr; 924 int error, i; 925 926 lowaddr = sc->jme_lowaddr; 927 again: 928 /* Create parent ring tag. */ 929 error = bus_dma_tag_create(NULL,/* parent */ 930 1, 0, /* algnmnt, boundary */ 931 lowaddr, /* lowaddr */ 932 BUS_SPACE_MAXADDR, /* highaddr */ 933 NULL, NULL, /* filter, filterarg */ 934 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 935 0, /* nsegments */ 936 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 937 0, /* flags */ 938 &sc->jme_cdata.jme_ring_tag); 939 if (error) { 940 device_printf(sc->jme_dev, 941 "could not create parent ring DMA tag.\n"); 942 return error; 943 } 944 945 /* 946 * Create DMA stuffs for TX ring 947 */ 948 949 /* Create tag for Tx ring. */ 950 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 951 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 952 lowaddr, /* lowaddr */ 953 BUS_SPACE_MAXADDR, /* highaddr */ 954 NULL, NULL, /* filter, filterarg */ 955 JME_TX_RING_SIZE, /* maxsize */ 956 1, /* nsegments */ 957 JME_TX_RING_SIZE, /* maxsegsize */ 958 0, /* flags */ 959 &sc->jme_cdata.jme_tx_ring_tag); 960 if (error) { 961 device_printf(sc->jme_dev, 962 "could not allocate Tx ring DMA tag.\n"); 963 return error; 964 } 965 966 /* Allocate DMA'able memory for TX ring */ 967 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag, 968 (void **)&sc->jme_rdata.jme_tx_ring, 969 BUS_DMA_WAITOK | BUS_DMA_ZERO, 970 &sc->jme_cdata.jme_tx_ring_map); 971 if (error) { 972 device_printf(sc->jme_dev, 973 "could not allocate DMA'able memory for Tx ring.\n"); 974 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 975 sc->jme_cdata.jme_tx_ring_tag = NULL; 976 return error; 977 } 978 979 /* Load the DMA map for Tx ring. */ 980 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag, 981 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 982 JME_TX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT); 983 if (error) { 984 device_printf(sc->jme_dev, 985 "could not load DMA'able memory for Tx ring.\n"); 986 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 987 sc->jme_rdata.jme_tx_ring, 988 sc->jme_cdata.jme_tx_ring_map); 989 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 990 sc->jme_cdata.jme_tx_ring_tag = NULL; 991 return error; 992 } 993 sc->jme_rdata.jme_tx_ring_paddr = busaddr; 994 995 /* 996 * Create DMA stuffs for RX ring 997 */ 998 999 /* Create tag for Rx ring. */ 1000 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1001 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 1002 lowaddr, /* lowaddr */ 1003 BUS_SPACE_MAXADDR, /* highaddr */ 1004 NULL, NULL, /* filter, filterarg */ 1005 JME_RX_RING_SIZE, /* maxsize */ 1006 1, /* nsegments */ 1007 JME_RX_RING_SIZE, /* maxsegsize */ 1008 0, /* flags */ 1009 &sc->jme_cdata.jme_rx_ring_tag); 1010 if (error) { 1011 device_printf(sc->jme_dev, 1012 "could not allocate Rx ring DMA tag.\n"); 1013 return error; 1014 } 1015 1016 /* Allocate DMA'able memory for RX ring */ 1017 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag, 1018 (void **)&sc->jme_rdata.jme_rx_ring, 1019 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1020 &sc->jme_cdata.jme_rx_ring_map); 1021 if (error) { 1022 device_printf(sc->jme_dev, 1023 "could not allocate DMA'able memory for Rx ring.\n"); 1024 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1025 sc->jme_cdata.jme_rx_ring_tag = NULL; 1026 return error; 1027 } 1028 1029 /* Load the DMA map for Rx ring. */ 1030 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag, 1031 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 1032 JME_RX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT); 1033 if (error) { 1034 device_printf(sc->jme_dev, 1035 "could not load DMA'able memory for Rx ring.\n"); 1036 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1037 sc->jme_rdata.jme_rx_ring, 1038 sc->jme_cdata.jme_rx_ring_map); 1039 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1040 sc->jme_cdata.jme_rx_ring_tag = NULL; 1041 return error; 1042 } 1043 sc->jme_rdata.jme_rx_ring_paddr = busaddr; 1044 1045 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1046 bus_addr_t rx_ring_end, tx_ring_end; 1047 1048 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 1049 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + 1050 JME_TX_RING_SIZE; 1051 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + 1052 JME_RX_RING_SIZE; 1053 if ((JME_ADDR_HI(tx_ring_end) != 1054 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 1055 (JME_ADDR_HI(rx_ring_end) != 1056 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 1057 device_printf(sc->jme_dev, "4GB boundary crossed, " 1058 "switching to 32bit DMA address mode.\n"); 1059 jme_dma_free(sc); 1060 /* Limit DMA address space to 32bit and try again. */ 1061 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1062 goto again; 1063 } 1064 } 1065 1066 /* Create parent buffer tag. */ 1067 error = bus_dma_tag_create(NULL,/* parent */ 1068 1, 0, /* algnmnt, boundary */ 1069 sc->jme_lowaddr, /* lowaddr */ 1070 BUS_SPACE_MAXADDR, /* highaddr */ 1071 NULL, NULL, /* filter, filterarg */ 1072 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1073 0, /* nsegments */ 1074 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1075 0, /* flags */ 1076 &sc->jme_cdata.jme_buffer_tag); 1077 if (error) { 1078 device_printf(sc->jme_dev, 1079 "could not create parent buffer DMA tag.\n"); 1080 return error; 1081 } 1082 1083 /* 1084 * Create DMA stuffs for shadow status block 1085 */ 1086 1087 /* Create shadow status block tag. */ 1088 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1089 JME_SSB_ALIGN, 0, /* algnmnt, boundary */ 1090 sc->jme_lowaddr, /* lowaddr */ 1091 BUS_SPACE_MAXADDR, /* highaddr */ 1092 NULL, NULL, /* filter, filterarg */ 1093 JME_SSB_SIZE, /* maxsize */ 1094 1, /* nsegments */ 1095 JME_SSB_SIZE, /* maxsegsize */ 1096 0, /* flags */ 1097 &sc->jme_cdata.jme_ssb_tag); 1098 if (error) { 1099 device_printf(sc->jme_dev, 1100 "could not create shared status block DMA tag.\n"); 1101 return error; 1102 } 1103 1104 /* Allocate DMA'able memory for shared status block. */ 1105 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag, 1106 (void **)&sc->jme_rdata.jme_ssb_block, 1107 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1108 &sc->jme_cdata.jme_ssb_map); 1109 if (error) { 1110 device_printf(sc->jme_dev, "could not allocate DMA'able " 1111 "memory for shared status block.\n"); 1112 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1113 sc->jme_cdata.jme_ssb_tag = NULL; 1114 return error; 1115 } 1116 1117 /* Load the DMA map for shared status block */ 1118 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag, 1119 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 1120 JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT); 1121 if (error) { 1122 device_printf(sc->jme_dev, "could not load DMA'able memory " 1123 "for shared status block.\n"); 1124 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1125 sc->jme_rdata.jme_ssb_block, 1126 sc->jme_cdata.jme_ssb_map); 1127 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1128 sc->jme_cdata.jme_ssb_tag = NULL; 1129 return error; 1130 } 1131 sc->jme_rdata.jme_ssb_block_paddr = busaddr; 1132 1133 /* 1134 * Create DMA stuffs for TX buffers 1135 */ 1136 1137 /* Create tag for Tx buffers. */ 1138 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1139 1, 0, /* algnmnt, boundary */ 1140 sc->jme_lowaddr, /* lowaddr */ 1141 BUS_SPACE_MAXADDR, /* highaddr */ 1142 NULL, NULL, /* filter, filterarg */ 1143 JME_TSO_MAXSIZE, /* maxsize */ 1144 JME_MAXTXSEGS, /* nsegments */ 1145 JME_TSO_MAXSEGSIZE, /* maxsegsize */ 1146 0, /* flags */ 1147 &sc->jme_cdata.jme_tx_tag); 1148 if (error != 0) { 1149 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1150 return error; 1151 } 1152 1153 /* Create DMA maps for Tx buffers. */ 1154 for (i = 0; i < JME_TX_RING_CNT; i++) { 1155 txd = &sc->jme_cdata.jme_txdesc[i]; 1156 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0, 1157 &txd->tx_dmamap); 1158 if (error) { 1159 int j; 1160 1161 device_printf(sc->jme_dev, 1162 "could not create %dth Tx dmamap.\n", i); 1163 1164 for (j = 0; j < i; ++j) { 1165 txd = &sc->jme_cdata.jme_txdesc[j]; 1166 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1167 txd->tx_dmamap); 1168 } 1169 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1170 sc->jme_cdata.jme_tx_tag = NULL; 1171 return error; 1172 } 1173 } 1174 1175 /* 1176 * Create DMA stuffs for RX buffers 1177 */ 1178 1179 /* Create tag for Rx buffers. */ 1180 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1181 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 1182 sc->jme_lowaddr, /* lowaddr */ 1183 BUS_SPACE_MAXADDR, /* highaddr */ 1184 NULL, NULL, /* filter, filterarg */ 1185 MCLBYTES, /* maxsize */ 1186 1, /* nsegments */ 1187 MCLBYTES, /* maxsegsize */ 1188 0, /* flags */ 1189 &sc->jme_cdata.jme_rx_tag); 1190 if (error) { 1191 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n"); 1192 return error; 1193 } 1194 1195 /* Create DMA maps for Rx buffers. */ 1196 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1197 &sc->jme_cdata.jme_rx_sparemap); 1198 if (error) { 1199 device_printf(sc->jme_dev, 1200 "could not create spare Rx dmamap.\n"); 1201 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1202 sc->jme_cdata.jme_rx_tag = NULL; 1203 return error; 1204 } 1205 for (i = 0; i < JME_RX_RING_CNT; i++) { 1206 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1207 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1208 &rxd->rx_dmamap); 1209 if (error) { 1210 int j; 1211 1212 device_printf(sc->jme_dev, 1213 "could not create %dth Rx dmamap.\n", i); 1214 1215 for (j = 0; j < i; ++j) { 1216 rxd = &sc->jme_cdata.jme_rxdesc[j]; 1217 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1218 rxd->rx_dmamap); 1219 } 1220 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1221 sc->jme_cdata.jme_rx_sparemap); 1222 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1223 sc->jme_cdata.jme_rx_tag = NULL; 1224 return error; 1225 } 1226 } 1227 return 0; 1228 } 1229 1230 static void 1231 jme_dma_free(struct jme_softc *sc) 1232 { 1233 struct jme_txdesc *txd; 1234 struct jme_rxdesc *rxd; 1235 int i; 1236 1237 /* Tx ring */ 1238 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1239 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1240 sc->jme_cdata.jme_tx_ring_map); 1241 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1242 sc->jme_rdata.jme_tx_ring, 1243 sc->jme_cdata.jme_tx_ring_map); 1244 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1245 sc->jme_cdata.jme_tx_ring_tag = NULL; 1246 } 1247 1248 /* Rx ring */ 1249 if (sc->jme_cdata.jme_rx_ring_tag != NULL) { 1250 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag, 1251 sc->jme_cdata.jme_rx_ring_map); 1252 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1253 sc->jme_rdata.jme_rx_ring, 1254 sc->jme_cdata.jme_rx_ring_map); 1255 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1256 sc->jme_cdata.jme_rx_ring_tag = NULL; 1257 } 1258 1259 /* Tx buffers */ 1260 if (sc->jme_cdata.jme_tx_tag != NULL) { 1261 for (i = 0; i < JME_TX_RING_CNT; i++) { 1262 txd = &sc->jme_cdata.jme_txdesc[i]; 1263 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1264 txd->tx_dmamap); 1265 } 1266 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1267 sc->jme_cdata.jme_tx_tag = NULL; 1268 } 1269 1270 /* Rx buffers */ 1271 if (sc->jme_cdata.jme_rx_tag != NULL) { 1272 for (i = 0; i < JME_RX_RING_CNT; i++) { 1273 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1274 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1275 rxd->rx_dmamap); 1276 } 1277 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1278 sc->jme_cdata.jme_rx_sparemap); 1279 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1280 sc->jme_cdata.jme_rx_tag = NULL; 1281 } 1282 1283 /* Shadow status block. */ 1284 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1285 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1286 sc->jme_cdata.jme_ssb_map); 1287 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1288 sc->jme_rdata.jme_ssb_block, 1289 sc->jme_cdata.jme_ssb_map); 1290 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1291 sc->jme_cdata.jme_ssb_tag = NULL; 1292 } 1293 1294 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1295 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1296 sc->jme_cdata.jme_buffer_tag = NULL; 1297 } 1298 if (sc->jme_cdata.jme_ring_tag != NULL) { 1299 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1300 sc->jme_cdata.jme_ring_tag = NULL; 1301 } 1302 } 1303 1304 /* 1305 * Make sure the interface is stopped at reboot time. 1306 */ 1307 static int 1308 jme_shutdown(device_t dev) 1309 { 1310 return jme_suspend(dev); 1311 } 1312 1313 #ifdef notyet 1314 /* 1315 * Unlike other ethernet controllers, JMC250 requires 1316 * explicit resetting link speed to 10/100Mbps as gigabit 1317 * link will cunsume more power than 375mA. 1318 * Note, we reset the link speed to 10/100Mbps with 1319 * auto-negotiation but we don't know whether that operation 1320 * would succeed or not as we have no control after powering 1321 * off. If the renegotiation fail WOL may not work. Running 1322 * at 1Gbps draws more power than 375mA at 3.3V which is 1323 * specified in PCI specification and that would result in 1324 * complete shutdowning power to ethernet controller. 1325 * 1326 * TODO 1327 * Save current negotiated media speed/duplex/flow-control 1328 * to softc and restore the same link again after resuming. 1329 * PHY handling such as power down/resetting to 100Mbps 1330 * may be better handled in suspend method in phy driver. 1331 */ 1332 static void 1333 jme_setlinkspeed(struct jme_softc *sc) 1334 { 1335 struct mii_data *mii; 1336 int aneg, i; 1337 1338 JME_LOCK_ASSERT(sc); 1339 1340 mii = device_get_softc(sc->jme_miibus); 1341 mii_pollstat(mii); 1342 aneg = 0; 1343 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1344 switch IFM_SUBTYPE(mii->mii_media_active) { 1345 case IFM_10_T: 1346 case IFM_100_TX: 1347 return; 1348 case IFM_1000_T: 1349 aneg++; 1350 default: 1351 break; 1352 } 1353 } 1354 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1355 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1356 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1357 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1358 BMCR_AUTOEN | BMCR_STARTNEG); 1359 DELAY(1000); 1360 if (aneg != 0) { 1361 /* Poll link state until jme(4) get a 10/100 link. */ 1362 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1363 mii_pollstat(mii); 1364 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1365 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1366 case IFM_10_T: 1367 case IFM_100_TX: 1368 jme_mac_config(sc); 1369 return; 1370 default: 1371 break; 1372 } 1373 } 1374 JME_UNLOCK(sc); 1375 pause("jmelnk", hz); 1376 JME_LOCK(sc); 1377 } 1378 if (i == MII_ANEGTICKS_GIGE) 1379 device_printf(sc->jme_dev, "establishing link failed, " 1380 "WOL may not work!"); 1381 } 1382 /* 1383 * No link, force MAC to have 100Mbps, full-duplex link. 1384 * This is the last resort and may/may not work. 1385 */ 1386 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1387 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1388 jme_mac_config(sc); 1389 } 1390 1391 static void 1392 jme_setwol(struct jme_softc *sc) 1393 { 1394 struct ifnet *ifp = &sc->arpcom.ac_if; 1395 uint32_t gpr, pmcs; 1396 uint16_t pmstat; 1397 int pmc; 1398 1399 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1400 /* No PME capability, PHY power down. */ 1401 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1402 MII_BMCR, BMCR_PDOWN); 1403 return; 1404 } 1405 1406 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1407 pmcs = CSR_READ_4(sc, JME_PMCS); 1408 pmcs &= ~PMCS_WOL_ENB_MASK; 1409 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1410 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1411 /* Enable PME message. */ 1412 gpr |= GPREG0_PME_ENB; 1413 /* For gigabit controllers, reset link speed to 10/100. */ 1414 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1415 jme_setlinkspeed(sc); 1416 } 1417 1418 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1419 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1420 1421 /* Request PME. */ 1422 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1423 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1424 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1425 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1426 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1427 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1428 /* No WOL, PHY power down. */ 1429 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1430 MII_BMCR, BMCR_PDOWN); 1431 } 1432 } 1433 #endif 1434 1435 static int 1436 jme_suspend(device_t dev) 1437 { 1438 struct jme_softc *sc = device_get_softc(dev); 1439 struct ifnet *ifp = &sc->arpcom.ac_if; 1440 1441 lwkt_serialize_enter(ifp->if_serializer); 1442 jme_stop(sc); 1443 #ifdef notyet 1444 jme_setwol(sc); 1445 #endif 1446 lwkt_serialize_exit(ifp->if_serializer); 1447 1448 return (0); 1449 } 1450 1451 static int 1452 jme_resume(device_t dev) 1453 { 1454 struct jme_softc *sc = device_get_softc(dev); 1455 struct ifnet *ifp = &sc->arpcom.ac_if; 1456 #ifdef notyet 1457 int pmc; 1458 #endif 1459 1460 lwkt_serialize_enter(ifp->if_serializer); 1461 1462 #ifdef notyet 1463 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1464 uint16_t pmstat; 1465 1466 pmstat = pci_read_config(sc->jme_dev, 1467 pmc + PCIR_POWER_STATUS, 2); 1468 /* Disable PME clear PME status. */ 1469 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1470 pci_write_config(sc->jme_dev, 1471 pmc + PCIR_POWER_STATUS, pmstat, 2); 1472 } 1473 #endif 1474 1475 if (ifp->if_flags & IFF_UP) 1476 jme_init(sc); 1477 1478 lwkt_serialize_exit(ifp->if_serializer); 1479 1480 return (0); 1481 } 1482 1483 static int 1484 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1485 { 1486 struct jme_txdesc *txd; 1487 struct jme_desc *desc; 1488 struct mbuf *m; 1489 struct jme_dmamap_ctx ctx; 1490 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1491 int maxsegs; 1492 int error, i, prod; 1493 uint32_t cflags, flag64; 1494 1495 M_ASSERTPKTHDR((*m_head)); 1496 1497 prod = sc->jme_cdata.jme_tx_prod; 1498 txd = &sc->jme_cdata.jme_txdesc[prod]; 1499 1500 maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) - 1501 (JME_TXD_RSVD + 1); 1502 if (maxsegs > JME_MAXTXSEGS) 1503 maxsegs = JME_MAXTXSEGS; 1504 KASSERT(maxsegs >= (sc->jme_txd_spare - 1), 1505 ("not enough segments %d\n", maxsegs)); 1506 1507 ctx.nsegs = maxsegs; 1508 ctx.segs = txsegs; 1509 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1510 *m_head, jme_dmamap_buf_cb, &ctx, 1511 BUS_DMA_NOWAIT); 1512 if (!error && ctx.nsegs == 0) { 1513 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 1514 error = EFBIG; 1515 } 1516 if (error == EFBIG) { 1517 m = m_defrag(*m_head, MB_DONTWAIT); 1518 if (m == NULL) { 1519 if_printf(&sc->arpcom.ac_if, 1520 "could not defrag TX mbuf\n"); 1521 m_freem(*m_head); 1522 *m_head = NULL; 1523 return (ENOMEM); 1524 } 1525 *m_head = m; 1526 1527 ctx.nsegs = maxsegs; 1528 ctx.segs = txsegs; 1529 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, 1530 txd->tx_dmamap, *m_head, 1531 jme_dmamap_buf_cb, &ctx, 1532 BUS_DMA_NOWAIT); 1533 if (error || ctx.nsegs == 0) { 1534 if_printf(&sc->arpcom.ac_if, 1535 "could not load defragged TX mbuf\n"); 1536 if (!error) { 1537 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 1538 txd->tx_dmamap); 1539 error = EFBIG; 1540 } 1541 m_freem(*m_head); 1542 *m_head = NULL; 1543 return (error); 1544 } 1545 } else if (error) { 1546 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n"); 1547 return (error); 1548 } 1549 1550 m = *m_head; 1551 cflags = 0; 1552 1553 /* Configure checksum offload. */ 1554 if (m->m_pkthdr.csum_flags & CSUM_IP) 1555 cflags |= JME_TD_IPCSUM; 1556 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1557 cflags |= JME_TD_TCPCSUM; 1558 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1559 cflags |= JME_TD_UDPCSUM; 1560 1561 /* Configure VLAN. */ 1562 if (m->m_flags & M_VLANTAG) { 1563 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1564 cflags |= JME_TD_VLAN_TAG; 1565 } 1566 1567 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1568 desc->flags = htole32(cflags); 1569 desc->addr_hi = htole32(m->m_pkthdr.len); 1570 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1571 /* 1572 * Use 64bits TX desc chain format. 1573 * 1574 * The first TX desc of the chain, which is setup here, 1575 * is just a symbol TX desc carrying no payload. 1576 */ 1577 flag64 = JME_TD_64BIT; 1578 desc->buflen = 0; 1579 desc->addr_lo = 0; 1580 1581 /* No effective TX desc is consumed */ 1582 i = 0; 1583 } else { 1584 /* 1585 * Use 32bits TX desc chain format. 1586 * 1587 * The first TX desc of the chain, which is setup here, 1588 * is an effective TX desc carrying the first segment of 1589 * the mbuf chain. 1590 */ 1591 flag64 = 0; 1592 desc->buflen = htole32(txsegs[0].ds_len); 1593 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1594 1595 /* One effective TX desc is consumed */ 1596 i = 1; 1597 } 1598 sc->jme_cdata.jme_tx_cnt++; 1599 KKASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD); 1600 JME_DESC_INC(prod, JME_TX_RING_CNT); 1601 1602 txd->tx_ndesc = 1 - i; 1603 for (; i < ctx.nsegs; i++) { 1604 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1605 desc->flags = htole32(JME_TD_OWN | flag64); 1606 desc->buflen = htole32(txsegs[i].ds_len); 1607 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1608 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1609 1610 sc->jme_cdata.jme_tx_cnt++; 1611 KKASSERT(sc->jme_cdata.jme_tx_cnt <= 1612 JME_TX_RING_CNT - JME_TXD_RSVD); 1613 JME_DESC_INC(prod, JME_TX_RING_CNT); 1614 } 1615 1616 /* Update producer index. */ 1617 sc->jme_cdata.jme_tx_prod = prod; 1618 /* 1619 * Finally request interrupt and give the first descriptor 1620 * owenership to hardware. 1621 */ 1622 desc = txd->tx_desc; 1623 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1624 1625 txd->tx_m = m; 1626 txd->tx_ndesc += ctx.nsegs; 1627 1628 /* Sync descriptors. */ 1629 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1630 BUS_DMASYNC_PREWRITE); 1631 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1632 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE); 1633 return (0); 1634 } 1635 1636 static void 1637 jme_start(struct ifnet *ifp) 1638 { 1639 struct jme_softc *sc = ifp->if_softc; 1640 struct mbuf *m_head; 1641 int enq = 0; 1642 1643 ASSERT_SERIALIZED(ifp->if_serializer); 1644 1645 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1646 ifq_purge(&ifp->if_snd); 1647 return; 1648 } 1649 1650 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1651 return; 1652 1653 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1654 jme_txeof(sc); 1655 1656 while (!ifq_is_empty(&ifp->if_snd)) { 1657 /* 1658 * Check number of available TX descs, always 1659 * leave JME_TXD_RSVD free TX descs. 1660 */ 1661 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1662 JME_TX_RING_CNT - JME_TXD_RSVD) { 1663 ifp->if_flags |= IFF_OACTIVE; 1664 break; 1665 } 1666 1667 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1668 if (m_head == NULL) 1669 break; 1670 1671 /* 1672 * Pack the data into the transmit ring. If we 1673 * don't have room, set the OACTIVE flag and wait 1674 * for the NIC to drain the ring. 1675 */ 1676 if (jme_encap(sc, &m_head)) { 1677 if (m_head == NULL) { 1678 ifp->if_oerrors++; 1679 break; 1680 } 1681 ifq_prepend(&ifp->if_snd, m_head); 1682 ifp->if_flags |= IFF_OACTIVE; 1683 break; 1684 } 1685 enq++; 1686 1687 /* 1688 * If there's a BPF listener, bounce a copy of this frame 1689 * to him. 1690 */ 1691 ETHER_BPF_MTAP(ifp, m_head); 1692 } 1693 1694 if (enq > 0) { 1695 /* 1696 * Reading TXCSR takes very long time under heavy load 1697 * so cache TXCSR value and writes the ORed value with 1698 * the kick command to the TXCSR. This saves one register 1699 * access cycle. 1700 */ 1701 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1702 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1703 /* Set a timeout in case the chip goes out to lunch. */ 1704 ifp->if_timer = JME_TX_TIMEOUT; 1705 } 1706 } 1707 1708 static void 1709 jme_watchdog(struct ifnet *ifp) 1710 { 1711 struct jme_softc *sc = ifp->if_softc; 1712 1713 ASSERT_SERIALIZED(ifp->if_serializer); 1714 1715 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1716 if_printf(ifp, "watchdog timeout (missed link)\n"); 1717 ifp->if_oerrors++; 1718 jme_init(sc); 1719 return; 1720 } 1721 1722 jme_txeof(sc); 1723 if (sc->jme_cdata.jme_tx_cnt == 0) { 1724 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1725 "-- recovering\n"); 1726 if (!ifq_is_empty(&ifp->if_snd)) 1727 if_devstart(ifp); 1728 return; 1729 } 1730 1731 if_printf(ifp, "watchdog timeout\n"); 1732 ifp->if_oerrors++; 1733 jme_init(sc); 1734 if (!ifq_is_empty(&ifp->if_snd)) 1735 if_devstart(ifp); 1736 } 1737 1738 static int 1739 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1740 { 1741 struct jme_softc *sc = ifp->if_softc; 1742 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1743 struct ifreq *ifr = (struct ifreq *)data; 1744 int error = 0, mask; 1745 1746 ASSERT_SERIALIZED(ifp->if_serializer); 1747 1748 switch (cmd) { 1749 case SIOCSIFMTU: 1750 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1751 (!(sc->jme_caps & JME_CAP_JUMBO) && 1752 ifr->ifr_mtu > JME_MAX_MTU)) { 1753 error = EINVAL; 1754 break; 1755 } 1756 1757 if (ifp->if_mtu != ifr->ifr_mtu) { 1758 /* 1759 * No special configuration is required when interface 1760 * MTU is changed but availability of Tx checksum 1761 * offload should be chcked against new MTU size as 1762 * FIFO size is just 2K. 1763 */ 1764 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1765 ifp->if_capenable &= ~IFCAP_TXCSUM; 1766 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1767 } 1768 ifp->if_mtu = ifr->ifr_mtu; 1769 if (ifp->if_flags & IFF_RUNNING) 1770 jme_init(sc); 1771 } 1772 break; 1773 1774 case SIOCSIFFLAGS: 1775 if (ifp->if_flags & IFF_UP) { 1776 if (ifp->if_flags & IFF_RUNNING) { 1777 if ((ifp->if_flags ^ sc->jme_if_flags) & 1778 (IFF_PROMISC | IFF_ALLMULTI)) 1779 jme_set_filter(sc); 1780 } else { 1781 jme_init(sc); 1782 } 1783 } else { 1784 if (ifp->if_flags & IFF_RUNNING) 1785 jme_stop(sc); 1786 } 1787 sc->jme_if_flags = ifp->if_flags; 1788 break; 1789 1790 case SIOCADDMULTI: 1791 case SIOCDELMULTI: 1792 if (ifp->if_flags & IFF_RUNNING) 1793 jme_set_filter(sc); 1794 break; 1795 1796 case SIOCSIFMEDIA: 1797 case SIOCGIFMEDIA: 1798 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1799 break; 1800 1801 case SIOCSIFCAP: 1802 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1803 1804 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1805 if (IFCAP_TXCSUM & ifp->if_capabilities) { 1806 ifp->if_capenable ^= IFCAP_TXCSUM; 1807 if (IFCAP_TXCSUM & ifp->if_capenable) 1808 ifp->if_hwassist |= JME_CSUM_FEATURES; 1809 else 1810 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1811 } 1812 } 1813 if ((mask & IFCAP_RXCSUM) && 1814 (IFCAP_RXCSUM & ifp->if_capabilities)) { 1815 uint32_t reg; 1816 1817 ifp->if_capenable ^= IFCAP_RXCSUM; 1818 reg = CSR_READ_4(sc, JME_RXMAC); 1819 reg &= ~RXMAC_CSUM_ENB; 1820 if (ifp->if_capenable & IFCAP_RXCSUM) 1821 reg |= RXMAC_CSUM_ENB; 1822 CSR_WRITE_4(sc, JME_RXMAC, reg); 1823 } 1824 1825 if ((mask & IFCAP_VLAN_HWTAGGING) && 1826 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) { 1827 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1828 jme_set_vlan(sc); 1829 } 1830 break; 1831 1832 default: 1833 error = ether_ioctl(ifp, cmd, data); 1834 break; 1835 } 1836 return (error); 1837 } 1838 1839 static void 1840 jme_mac_config(struct jme_softc *sc) 1841 { 1842 struct mii_data *mii; 1843 uint32_t ghc, rxmac, txmac, txpause, gp1; 1844 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1845 1846 mii = device_get_softc(sc->jme_miibus); 1847 1848 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1849 DELAY(10); 1850 CSR_WRITE_4(sc, JME_GHC, 0); 1851 ghc = 0; 1852 rxmac = CSR_READ_4(sc, JME_RXMAC); 1853 rxmac &= ~RXMAC_FC_ENB; 1854 txmac = CSR_READ_4(sc, JME_TXMAC); 1855 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1856 txpause = CSR_READ_4(sc, JME_TXPFC); 1857 txpause &= ~TXPFC_PAUSE_ENB; 1858 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1859 ghc |= GHC_FULL_DUPLEX; 1860 rxmac &= ~RXMAC_COLL_DET_ENB; 1861 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1862 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1863 TXMAC_FRAME_BURST); 1864 #ifdef notyet 1865 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1866 txpause |= TXPFC_PAUSE_ENB; 1867 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1868 rxmac |= RXMAC_FC_ENB; 1869 #endif 1870 /* Disable retry transmit timer/retry limit. */ 1871 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1872 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1873 } else { 1874 rxmac |= RXMAC_COLL_DET_ENB; 1875 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1876 /* Enable retry transmit timer/retry limit. */ 1877 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1878 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1879 } 1880 1881 /* 1882 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1883 */ 1884 gp1 = CSR_READ_4(sc, JME_GPREG1); 1885 gp1 &= ~GPREG1_WA_HDX; 1886 1887 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1888 hdx = 1; 1889 1890 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1891 case IFM_10_T: 1892 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 1893 if (hdx) 1894 gp1 |= GPREG1_WA_HDX; 1895 break; 1896 1897 case IFM_100_TX: 1898 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 1899 if (hdx) 1900 gp1 |= GPREG1_WA_HDX; 1901 1902 /* 1903 * Use extended FIFO depth to workaround CRC errors 1904 * emitted by chips before JMC250B 1905 */ 1906 phyconf = JMPHY_CONF_EXTFIFO; 1907 break; 1908 1909 case IFM_1000_T: 1910 if (sc->jme_caps & JME_CAP_FASTETH) 1911 break; 1912 1913 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 1914 if (hdx) 1915 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1916 break; 1917 1918 default: 1919 break; 1920 } 1921 CSR_WRITE_4(sc, JME_GHC, ghc); 1922 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1923 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1924 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1925 1926 if (sc->jme_workaround & JME_WA_EXTFIFO) { 1927 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1928 JMPHY_CONF, phyconf); 1929 } 1930 if (sc->jme_workaround & JME_WA_HDX) 1931 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1932 } 1933 1934 static void 1935 jme_intr(void *xsc) 1936 { 1937 struct jme_softc *sc = xsc; 1938 struct ifnet *ifp = &sc->arpcom.ac_if; 1939 uint32_t status; 1940 1941 ASSERT_SERIALIZED(ifp->if_serializer); 1942 1943 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1944 if (status == 0 || status == 0xFFFFFFFF) 1945 return; 1946 1947 /* Disable interrupts. */ 1948 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1949 1950 status = CSR_READ_4(sc, JME_INTR_STATUS); 1951 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1952 goto back; 1953 1954 /* Reset PCC counter/timer and Ack interrupts. */ 1955 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1956 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1957 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1958 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1959 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1960 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1961 1962 if (ifp->if_flags & IFF_RUNNING) { 1963 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1964 jme_rxeof(sc); 1965 1966 if (status & INTR_RXQ_DESC_EMPTY) { 1967 /* 1968 * Notify hardware availability of new Rx buffers. 1969 * Reading RXCSR takes very long time under heavy 1970 * load so cache RXCSR value and writes the ORed 1971 * value with the kick command to the RXCSR. This 1972 * saves one register access cycle. 1973 */ 1974 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1975 RXCSR_RX_ENB | RXCSR_RXQ_START); 1976 } 1977 1978 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1979 jme_txeof(sc); 1980 if (!ifq_is_empty(&ifp->if_snd)) 1981 if_devstart(ifp); 1982 } 1983 } 1984 back: 1985 /* Reenable interrupts. */ 1986 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1987 } 1988 1989 static void 1990 jme_txeof(struct jme_softc *sc) 1991 { 1992 struct ifnet *ifp = &sc->arpcom.ac_if; 1993 struct jme_txdesc *txd; 1994 uint32_t status; 1995 int cons, nsegs; 1996 1997 cons = sc->jme_cdata.jme_tx_cons; 1998 if (cons == sc->jme_cdata.jme_tx_prod) 1999 return; 2000 2001 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2002 sc->jme_cdata.jme_tx_ring_map, 2003 BUS_DMASYNC_POSTREAD); 2004 2005 /* 2006 * Go through our Tx list and free mbufs for those 2007 * frames which have been transmitted. 2008 */ 2009 while (cons != sc->jme_cdata.jme_tx_prod) { 2010 txd = &sc->jme_cdata.jme_txdesc[cons]; 2011 KASSERT(txd->tx_m != NULL, 2012 ("%s: freeing NULL mbuf!\n", __func__)); 2013 2014 status = le32toh(txd->tx_desc->flags); 2015 if ((status & JME_TD_OWN) == JME_TD_OWN) 2016 break; 2017 2018 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2019 ifp->if_oerrors++; 2020 } else { 2021 ifp->if_opackets++; 2022 if (status & JME_TD_COLLISION) { 2023 ifp->if_collisions += 2024 le32toh(txd->tx_desc->buflen) & 2025 JME_TD_BUF_LEN_MASK; 2026 } 2027 } 2028 2029 /* 2030 * Only the first descriptor of multi-descriptor 2031 * transmission is updated so driver have to skip entire 2032 * chained buffers for the transmiited frame. In other 2033 * words, JME_TD_OWN bit is valid only at the first 2034 * descriptor of a multi-descriptor transmission. 2035 */ 2036 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2037 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 2038 JME_DESC_INC(cons, JME_TX_RING_CNT); 2039 } 2040 2041 /* Reclaim transferred mbufs. */ 2042 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2043 m_freem(txd->tx_m); 2044 txd->tx_m = NULL; 2045 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2046 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2047 ("%s: Active Tx desc counter was garbled\n", __func__)); 2048 txd->tx_ndesc = 0; 2049 } 2050 sc->jme_cdata.jme_tx_cons = cons; 2051 2052 if (sc->jme_cdata.jme_tx_cnt == 0) 2053 ifp->if_timer = 0; 2054 2055 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 2056 JME_TX_RING_CNT - JME_TXD_RSVD) 2057 ifp->if_flags &= ~IFF_OACTIVE; 2058 2059 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2060 sc->jme_cdata.jme_tx_ring_map, 2061 BUS_DMASYNC_PREWRITE); 2062 } 2063 2064 static __inline void 2065 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 2066 { 2067 int i; 2068 2069 for (i = 0; i < count; ++i) { 2070 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 2071 2072 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2073 desc->buflen = htole32(MCLBYTES); 2074 JME_DESC_INC(cons, JME_RX_RING_CNT); 2075 } 2076 } 2077 2078 /* Receive a frame. */ 2079 static void 2080 jme_rxpkt(struct jme_softc *sc) 2081 { 2082 struct ifnet *ifp = &sc->arpcom.ac_if; 2083 struct jme_desc *desc; 2084 struct jme_rxdesc *rxd; 2085 struct mbuf *mp, *m; 2086 uint32_t flags, status; 2087 int cons, count, nsegs; 2088 2089 cons = sc->jme_cdata.jme_rx_cons; 2090 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2091 flags = le32toh(desc->flags); 2092 status = le32toh(desc->buflen); 2093 nsegs = JME_RX_NSEGS(status); 2094 2095 if (status & JME_RX_ERR_STAT) { 2096 ifp->if_ierrors++; 2097 jme_discard_rxbufs(sc, cons, nsegs); 2098 #ifdef JME_SHOW_ERRORS 2099 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2100 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2101 #endif 2102 sc->jme_cdata.jme_rx_cons += nsegs; 2103 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2104 return; 2105 } 2106 2107 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2108 for (count = 0; count < nsegs; count++, 2109 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 2110 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 2111 mp = rxd->rx_m; 2112 2113 /* Add a new receive buffer to the ring. */ 2114 if (jme_newbuf(sc, rxd, 0) != 0) { 2115 ifp->if_iqdrops++; 2116 /* Reuse buffer. */ 2117 jme_discard_rxbufs(sc, cons, nsegs - count); 2118 if (sc->jme_cdata.jme_rxhead != NULL) { 2119 m_freem(sc->jme_cdata.jme_rxhead); 2120 JME_RXCHAIN_RESET(sc); 2121 } 2122 break; 2123 } 2124 2125 /* 2126 * Assume we've received a full sized frame. 2127 * Actual size is fixed when we encounter the end of 2128 * multi-segmented frame. 2129 */ 2130 mp->m_len = MCLBYTES; 2131 2132 /* Chain received mbufs. */ 2133 if (sc->jme_cdata.jme_rxhead == NULL) { 2134 sc->jme_cdata.jme_rxhead = mp; 2135 sc->jme_cdata.jme_rxtail = mp; 2136 } else { 2137 /* 2138 * Receive processor can receive a maximum frame 2139 * size of 65535 bytes. 2140 */ 2141 mp->m_flags &= ~M_PKTHDR; 2142 sc->jme_cdata.jme_rxtail->m_next = mp; 2143 sc->jme_cdata.jme_rxtail = mp; 2144 } 2145 2146 if (count == nsegs - 1) { 2147 /* Last desc. for this frame. */ 2148 m = sc->jme_cdata.jme_rxhead; 2149 /* XXX assert PKTHDR? */ 2150 m->m_flags |= M_PKTHDR; 2151 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 2152 if (nsegs > 1) { 2153 /* Set first mbuf size. */ 2154 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2155 /* Set last mbuf size. */ 2156 mp->m_len = sc->jme_cdata.jme_rxlen - 2157 ((MCLBYTES - JME_RX_PAD_BYTES) + 2158 (MCLBYTES * (nsegs - 2))); 2159 } else { 2160 m->m_len = sc->jme_cdata.jme_rxlen; 2161 } 2162 m->m_pkthdr.rcvif = ifp; 2163 2164 /* 2165 * Account for 10bytes auto padding which is used 2166 * to align IP header on 32bit boundary. Also note, 2167 * CRC bytes is automatically removed by the 2168 * hardware. 2169 */ 2170 m->m_data += JME_RX_PAD_BYTES; 2171 2172 /* Set checksum information. */ 2173 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2174 (flags & JME_RD_IPV4)) { 2175 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2176 if (flags & JME_RD_IPCSUM) 2177 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2178 if ((flags & JME_RD_MORE_FRAG) == 0 && 2179 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2180 (JME_RD_TCP | JME_RD_TCPCSUM) || 2181 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2182 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2183 m->m_pkthdr.csum_flags |= 2184 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2185 m->m_pkthdr.csum_data = 0xffff; 2186 } 2187 } 2188 2189 /* Check for VLAN tagged packets. */ 2190 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2191 (flags & JME_RD_VLAN_TAG)) { 2192 m->m_pkthdr.ether_vlantag = 2193 flags & JME_RD_VLAN_MASK; 2194 m->m_flags |= M_VLANTAG; 2195 } 2196 2197 ifp->if_ipackets++; 2198 /* Pass it on. */ 2199 ifp->if_input(ifp, m); 2200 2201 /* Reset mbuf chains. */ 2202 JME_RXCHAIN_RESET(sc); 2203 } 2204 } 2205 2206 sc->jme_cdata.jme_rx_cons += nsegs; 2207 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2208 } 2209 2210 static void 2211 jme_rxeof(struct jme_softc *sc) 2212 { 2213 struct jme_desc *desc; 2214 int nsegs, prog, pktlen; 2215 2216 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2217 sc->jme_cdata.jme_rx_ring_map, 2218 BUS_DMASYNC_POSTREAD); 2219 2220 prog = 0; 2221 for (;;) { 2222 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 2223 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2224 break; 2225 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2226 break; 2227 2228 /* 2229 * Check number of segments against received bytes. 2230 * Non-matching value would indicate that hardware 2231 * is still trying to update Rx descriptors. I'm not 2232 * sure whether this check is needed. 2233 */ 2234 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2235 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2236 if (nsegs != howmany(pktlen, MCLBYTES)) { 2237 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) " 2238 "and packet size(%d) mismach\n", 2239 nsegs, pktlen); 2240 break; 2241 } 2242 2243 /* Received a frame. */ 2244 jme_rxpkt(sc); 2245 prog++; 2246 } 2247 2248 if (prog > 0) { 2249 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2250 sc->jme_cdata.jme_rx_ring_map, 2251 BUS_DMASYNC_PREWRITE); 2252 } 2253 } 2254 2255 static void 2256 jme_tick(void *xsc) 2257 { 2258 struct jme_softc *sc = xsc; 2259 struct ifnet *ifp = &sc->arpcom.ac_if; 2260 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2261 2262 lwkt_serialize_enter(ifp->if_serializer); 2263 2264 mii_tick(mii); 2265 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2266 2267 lwkt_serialize_exit(ifp->if_serializer); 2268 } 2269 2270 static void 2271 jme_reset(struct jme_softc *sc) 2272 { 2273 #ifdef foo 2274 /* Stop receiver, transmitter. */ 2275 jme_stop_rx(sc); 2276 jme_stop_tx(sc); 2277 #endif 2278 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2279 DELAY(10); 2280 CSR_WRITE_4(sc, JME_GHC, 0); 2281 } 2282 2283 static void 2284 jme_init(void *xsc) 2285 { 2286 struct jme_softc *sc = xsc; 2287 struct ifnet *ifp = &sc->arpcom.ac_if; 2288 struct mii_data *mii; 2289 uint8_t eaddr[ETHER_ADDR_LEN]; 2290 bus_addr_t paddr; 2291 uint32_t reg; 2292 int error; 2293 2294 ASSERT_SERIALIZED(ifp->if_serializer); 2295 2296 /* 2297 * Cancel any pending I/O. 2298 */ 2299 jme_stop(sc); 2300 2301 /* 2302 * Reset the chip to a known state. 2303 */ 2304 jme_reset(sc); 2305 2306 /* 2307 * Since we always use 64bit address mode for transmitting, 2308 * each Tx request requires one more dummy descriptor. 2309 */ 2310 sc->jme_txd_spare = 2311 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1; 2312 KKASSERT(sc->jme_txd_spare >= 2); 2313 2314 /* Init descriptors. */ 2315 error = jme_init_rx_ring(sc); 2316 if (error != 0) { 2317 device_printf(sc->jme_dev, 2318 "%s: initialization failed: no memory for Rx buffers.\n", 2319 __func__); 2320 jme_stop(sc); 2321 return; 2322 } 2323 jme_init_tx_ring(sc); 2324 2325 /* Initialize shadow status block. */ 2326 jme_init_ssb(sc); 2327 2328 /* Reprogram the station address. */ 2329 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2330 CSR_WRITE_4(sc, JME_PAR0, 2331 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2332 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2333 2334 /* 2335 * Configure Tx queue. 2336 * Tx priority queue weight value : 0 2337 * Tx FIFO threshold for processing next packet : 16QW 2338 * Maximum Tx DMA length : 512 2339 * Allow Tx DMA burst. 2340 */ 2341 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2342 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2343 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2344 sc->jme_txcsr |= sc->jme_tx_dma_size; 2345 sc->jme_txcsr |= TXCSR_DMA_BURST; 2346 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2347 2348 /* Set Tx descriptor counter. */ 2349 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 2350 2351 /* Set Tx ring address to the hardware. */ 2352 paddr = JME_TX_RING_ADDR(sc, 0); 2353 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2354 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2355 2356 /* Configure TxMAC parameters. */ 2357 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2358 reg |= TXMAC_THRESH_1_PKT; 2359 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2360 CSR_WRITE_4(sc, JME_TXMAC, reg); 2361 2362 /* 2363 * Configure Rx queue. 2364 * FIFO full threshold for transmitting Tx pause packet : 128T 2365 * FIFO threshold for processing next packet : 128QW 2366 * Rx queue 0 select 2367 * Max Rx DMA length : 128 2368 * Rx descriptor retry : 32 2369 * Rx descriptor retry time gap : 256ns 2370 * Don't receive runt/bad frame. 2371 */ 2372 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2373 /* 2374 * Since Rx FIFO size is 4K bytes, receiving frames larger 2375 * than 4K bytes will suffer from Rx FIFO overruns. So 2376 * decrease FIFO threshold to reduce the FIFO overruns for 2377 * frames larger than 4000 bytes. 2378 * For best performance of standard MTU sized frames use 2379 * maximum allowable FIFO threshold, 128QW. 2380 */ 2381 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2382 JME_RX_FIFO_SIZE) 2383 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2384 else 2385 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2386 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 2387 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2388 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2389 /* XXX TODO DROP_BAD */ 2390 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2391 2392 /* Set Rx descriptor counter. */ 2393 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 2394 2395 /* Set Rx ring address to the hardware. */ 2396 paddr = JME_RX_RING_ADDR(sc, 0); 2397 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2398 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2399 2400 /* Clear receive filter. */ 2401 CSR_WRITE_4(sc, JME_RXMAC, 0); 2402 2403 /* Set up the receive filter. */ 2404 jme_set_filter(sc); 2405 jme_set_vlan(sc); 2406 2407 /* 2408 * Disable all WOL bits as WOL can interfere normal Rx 2409 * operation. Also clear WOL detection status bits. 2410 */ 2411 reg = CSR_READ_4(sc, JME_PMCS); 2412 reg &= ~PMCS_WOL_ENB_MASK; 2413 CSR_WRITE_4(sc, JME_PMCS, reg); 2414 2415 /* 2416 * Pad 10bytes right before received frame. This will greatly 2417 * help Rx performance on strict-alignment architectures as 2418 * it does not need to copy the frame to align the payload. 2419 */ 2420 reg = CSR_READ_4(sc, JME_RXMAC); 2421 reg |= RXMAC_PAD_10BYTES; 2422 2423 if (ifp->if_capenable & IFCAP_RXCSUM) 2424 reg |= RXMAC_CSUM_ENB; 2425 CSR_WRITE_4(sc, JME_RXMAC, reg); 2426 2427 /* Configure general purpose reg0 */ 2428 reg = CSR_READ_4(sc, JME_GPREG0); 2429 reg &= ~GPREG0_PCC_UNIT_MASK; 2430 /* Set PCC timer resolution to micro-seconds unit. */ 2431 reg |= GPREG0_PCC_UNIT_US; 2432 /* 2433 * Disable all shadow register posting as we have to read 2434 * JME_INTR_STATUS register in jme_intr. Also it seems 2435 * that it's hard to synchronize interrupt status between 2436 * hardware and software with shadow posting due to 2437 * requirements of bus_dmamap_sync(9). 2438 */ 2439 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2440 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2441 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2442 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2443 /* Disable posting of DW0. */ 2444 reg &= ~GPREG0_POST_DW0_ENB; 2445 /* Clear PME message. */ 2446 reg &= ~GPREG0_PME_ENB; 2447 /* Set PHY address. */ 2448 reg &= ~GPREG0_PHY_ADDR_MASK; 2449 reg |= sc->jme_phyaddr; 2450 CSR_WRITE_4(sc, JME_GPREG0, reg); 2451 2452 /* Configure Tx queue 0 packet completion coalescing. */ 2453 jme_set_tx_coal(sc); 2454 2455 /* Configure Rx queue 0 packet completion coalescing. */ 2456 jme_set_rx_coal(sc); 2457 2458 /* Configure shadow status block but don't enable posting. */ 2459 paddr = sc->jme_rdata.jme_ssb_block_paddr; 2460 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2461 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2462 2463 /* Disable Timer 1 and Timer 2. */ 2464 CSR_WRITE_4(sc, JME_TIMER1, 0); 2465 CSR_WRITE_4(sc, JME_TIMER2, 0); 2466 2467 /* Configure retry transmit period, retry limit value. */ 2468 CSR_WRITE_4(sc, JME_TXTRHD, 2469 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2470 TXTRHD_RT_PERIOD_MASK) | 2471 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2472 TXTRHD_RT_LIMIT_SHIFT)); 2473 2474 /* Disable RSS. */ 2475 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 2476 2477 /* Initialize the interrupt mask. */ 2478 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2479 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2480 2481 /* 2482 * Enabling Tx/Rx DMA engines and Rx queue processing is 2483 * done after detection of valid link in jme_miibus_statchg. 2484 */ 2485 sc->jme_flags &= ~JME_FLAG_LINK; 2486 2487 /* Set the current media. */ 2488 mii = device_get_softc(sc->jme_miibus); 2489 mii_mediachg(mii); 2490 2491 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2492 2493 ifp->if_flags |= IFF_RUNNING; 2494 ifp->if_flags &= ~IFF_OACTIVE; 2495 } 2496 2497 static void 2498 jme_stop(struct jme_softc *sc) 2499 { 2500 struct ifnet *ifp = &sc->arpcom.ac_if; 2501 struct jme_txdesc *txd; 2502 struct jme_rxdesc *rxd; 2503 int i; 2504 2505 ASSERT_SERIALIZED(ifp->if_serializer); 2506 2507 /* 2508 * Mark the interface down and cancel the watchdog timer. 2509 */ 2510 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2511 ifp->if_timer = 0; 2512 2513 callout_stop(&sc->jme_tick_ch); 2514 sc->jme_flags &= ~JME_FLAG_LINK; 2515 2516 /* 2517 * Disable interrupts. 2518 */ 2519 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2520 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2521 2522 /* Disable updating shadow status block. */ 2523 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2524 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2525 2526 /* Stop receiver, transmitter. */ 2527 jme_stop_rx(sc); 2528 jme_stop_tx(sc); 2529 2530 #ifdef foo 2531 /* Reclaim Rx/Tx buffers that have been completed. */ 2532 jme_rxeof(sc); 2533 if (sc->jme_cdata.jme_rxhead != NULL) 2534 m_freem(sc->jme_cdata.jme_rxhead); 2535 JME_RXCHAIN_RESET(sc); 2536 jme_txeof(sc); 2537 #endif 2538 2539 /* 2540 * Free partial finished RX segments 2541 */ 2542 if (sc->jme_cdata.jme_rxhead != NULL) 2543 m_freem(sc->jme_cdata.jme_rxhead); 2544 JME_RXCHAIN_RESET(sc); 2545 2546 /* 2547 * Free RX and TX mbufs still in the queues. 2548 */ 2549 for (i = 0; i < JME_RX_RING_CNT; i++) { 2550 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2551 if (rxd->rx_m != NULL) { 2552 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 2553 rxd->rx_dmamap); 2554 m_freem(rxd->rx_m); 2555 rxd->rx_m = NULL; 2556 } 2557 } 2558 for (i = 0; i < JME_TX_RING_CNT; i++) { 2559 txd = &sc->jme_cdata.jme_txdesc[i]; 2560 if (txd->tx_m != NULL) { 2561 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 2562 txd->tx_dmamap); 2563 m_freem(txd->tx_m); 2564 txd->tx_m = NULL; 2565 txd->tx_ndesc = 0; 2566 } 2567 } 2568 } 2569 2570 static void 2571 jme_stop_tx(struct jme_softc *sc) 2572 { 2573 uint32_t reg; 2574 int i; 2575 2576 reg = CSR_READ_4(sc, JME_TXCSR); 2577 if ((reg & TXCSR_TX_ENB) == 0) 2578 return; 2579 reg &= ~TXCSR_TX_ENB; 2580 CSR_WRITE_4(sc, JME_TXCSR, reg); 2581 for (i = JME_TIMEOUT; i > 0; i--) { 2582 DELAY(1); 2583 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2584 break; 2585 } 2586 if (i == 0) 2587 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2588 } 2589 2590 static void 2591 jme_stop_rx(struct jme_softc *sc) 2592 { 2593 uint32_t reg; 2594 int i; 2595 2596 reg = CSR_READ_4(sc, JME_RXCSR); 2597 if ((reg & RXCSR_RX_ENB) == 0) 2598 return; 2599 reg &= ~RXCSR_RX_ENB; 2600 CSR_WRITE_4(sc, JME_RXCSR, reg); 2601 for (i = JME_TIMEOUT; i > 0; i--) { 2602 DELAY(1); 2603 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2604 break; 2605 } 2606 if (i == 0) 2607 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2608 } 2609 2610 static void 2611 jme_init_tx_ring(struct jme_softc *sc) 2612 { 2613 struct jme_ring_data *rd; 2614 struct jme_txdesc *txd; 2615 int i; 2616 2617 sc->jme_cdata.jme_tx_prod = 0; 2618 sc->jme_cdata.jme_tx_cons = 0; 2619 sc->jme_cdata.jme_tx_cnt = 0; 2620 2621 rd = &sc->jme_rdata; 2622 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2623 for (i = 0; i < JME_TX_RING_CNT; i++) { 2624 txd = &sc->jme_cdata.jme_txdesc[i]; 2625 txd->tx_m = NULL; 2626 txd->tx_desc = &rd->jme_tx_ring[i]; 2627 txd->tx_ndesc = 0; 2628 } 2629 2630 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2631 sc->jme_cdata.jme_tx_ring_map, 2632 BUS_DMASYNC_PREWRITE); 2633 } 2634 2635 static void 2636 jme_init_ssb(struct jme_softc *sc) 2637 { 2638 struct jme_ring_data *rd; 2639 2640 rd = &sc->jme_rdata; 2641 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2642 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, 2643 BUS_DMASYNC_PREWRITE); 2644 } 2645 2646 static int 2647 jme_init_rx_ring(struct jme_softc *sc) 2648 { 2649 struct jme_ring_data *rd; 2650 struct jme_rxdesc *rxd; 2651 int i; 2652 2653 KKASSERT(sc->jme_cdata.jme_rxhead == NULL && 2654 sc->jme_cdata.jme_rxtail == NULL && 2655 sc->jme_cdata.jme_rxlen == 0); 2656 sc->jme_cdata.jme_rx_cons = 0; 2657 2658 rd = &sc->jme_rdata; 2659 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2660 for (i = 0; i < JME_RX_RING_CNT; i++) { 2661 int error; 2662 2663 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2664 rxd->rx_m = NULL; 2665 rxd->rx_desc = &rd->jme_rx_ring[i]; 2666 error = jme_newbuf(sc, rxd, 1); 2667 if (error) 2668 return (error); 2669 } 2670 2671 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2672 sc->jme_cdata.jme_rx_ring_map, 2673 BUS_DMASYNC_PREWRITE); 2674 return (0); 2675 } 2676 2677 static int 2678 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init) 2679 { 2680 struct jme_desc *desc; 2681 struct mbuf *m; 2682 struct jme_dmamap_ctx ctx; 2683 bus_dma_segment_t segs; 2684 bus_dmamap_t map; 2685 int error; 2686 2687 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2688 if (m == NULL) 2689 return (ENOBUFS); 2690 /* 2691 * JMC250 has 64bit boundary alignment limitation so jme(4) 2692 * takes advantage of 10 bytes padding feature of hardware 2693 * in order not to copy entire frame to align IP header on 2694 * 32bit boundary. 2695 */ 2696 m->m_len = m->m_pkthdr.len = MCLBYTES; 2697 2698 ctx.nsegs = 1; 2699 ctx.segs = &segs; 2700 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag, 2701 sc->jme_cdata.jme_rx_sparemap, 2702 m, jme_dmamap_buf_cb, &ctx, 2703 BUS_DMA_NOWAIT); 2704 if (error || ctx.nsegs == 0) { 2705 if (!error) { 2706 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 2707 sc->jme_cdata.jme_rx_sparemap); 2708 error = EFBIG; 2709 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2710 } 2711 m_freem(m); 2712 2713 if (init) 2714 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2715 return (error); 2716 } 2717 2718 if (rxd->rx_m != NULL) { 2719 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 2720 BUS_DMASYNC_POSTREAD); 2721 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); 2722 } 2723 map = rxd->rx_dmamap; 2724 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2725 sc->jme_cdata.jme_rx_sparemap = map; 2726 rxd->rx_m = m; 2727 2728 desc = rxd->rx_desc; 2729 desc->buflen = htole32(segs.ds_len); 2730 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr)); 2731 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr)); 2732 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2733 2734 return (0); 2735 } 2736 2737 static void 2738 jme_set_vlan(struct jme_softc *sc) 2739 { 2740 struct ifnet *ifp = &sc->arpcom.ac_if; 2741 uint32_t reg; 2742 2743 ASSERT_SERIALIZED(ifp->if_serializer); 2744 2745 reg = CSR_READ_4(sc, JME_RXMAC); 2746 reg &= ~RXMAC_VLAN_ENB; 2747 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 2748 reg |= RXMAC_VLAN_ENB; 2749 CSR_WRITE_4(sc, JME_RXMAC, reg); 2750 } 2751 2752 static void 2753 jme_set_filter(struct jme_softc *sc) 2754 { 2755 struct ifnet *ifp = &sc->arpcom.ac_if; 2756 struct ifmultiaddr *ifma; 2757 uint32_t crc; 2758 uint32_t mchash[2]; 2759 uint32_t rxcfg; 2760 2761 ASSERT_SERIALIZED(ifp->if_serializer); 2762 2763 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2764 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2765 RXMAC_ALLMULTI); 2766 2767 /* 2768 * Always accept frames destined to our station address. 2769 * Always accept broadcast frames. 2770 */ 2771 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2772 2773 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2774 if (ifp->if_flags & IFF_PROMISC) 2775 rxcfg |= RXMAC_PROMISC; 2776 if (ifp->if_flags & IFF_ALLMULTI) 2777 rxcfg |= RXMAC_ALLMULTI; 2778 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 2779 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 2780 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2781 return; 2782 } 2783 2784 /* 2785 * Set up the multicast address filter by passing all multicast 2786 * addresses through a CRC generator, and then using the low-order 2787 * 6 bits as an index into the 64 bit multicast hash table. The 2788 * high order bits select the register, while the rest of the bits 2789 * select the bit within the register. 2790 */ 2791 rxcfg |= RXMAC_MULTICAST; 2792 bzero(mchash, sizeof(mchash)); 2793 2794 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2795 if (ifma->ifma_addr->sa_family != AF_LINK) 2796 continue; 2797 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2798 ifma->ifma_addr), ETHER_ADDR_LEN); 2799 2800 /* Just want the 6 least significant bits. */ 2801 crc &= 0x3f; 2802 2803 /* Set the corresponding bit in the hash table. */ 2804 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2805 } 2806 2807 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2808 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2809 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2810 } 2811 2812 static int 2813 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 2814 { 2815 struct jme_softc *sc = arg1; 2816 struct ifnet *ifp = &sc->arpcom.ac_if; 2817 int error, v; 2818 2819 lwkt_serialize_enter(ifp->if_serializer); 2820 2821 v = sc->jme_tx_coal_to; 2822 error = sysctl_handle_int(oidp, &v, 0, req); 2823 if (error || req->newptr == NULL) 2824 goto back; 2825 2826 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 2827 error = EINVAL; 2828 goto back; 2829 } 2830 2831 if (v != sc->jme_tx_coal_to) { 2832 sc->jme_tx_coal_to = v; 2833 if (ifp->if_flags & IFF_RUNNING) 2834 jme_set_tx_coal(sc); 2835 } 2836 back: 2837 lwkt_serialize_exit(ifp->if_serializer); 2838 return error; 2839 } 2840 2841 static int 2842 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 2843 { 2844 struct jme_softc *sc = arg1; 2845 struct ifnet *ifp = &sc->arpcom.ac_if; 2846 int error, v; 2847 2848 lwkt_serialize_enter(ifp->if_serializer); 2849 2850 v = sc->jme_tx_coal_pkt; 2851 error = sysctl_handle_int(oidp, &v, 0, req); 2852 if (error || req->newptr == NULL) 2853 goto back; 2854 2855 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 2856 error = EINVAL; 2857 goto back; 2858 } 2859 2860 if (v != sc->jme_tx_coal_pkt) { 2861 sc->jme_tx_coal_pkt = v; 2862 if (ifp->if_flags & IFF_RUNNING) 2863 jme_set_tx_coal(sc); 2864 } 2865 back: 2866 lwkt_serialize_exit(ifp->if_serializer); 2867 return error; 2868 } 2869 2870 static int 2871 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 2872 { 2873 struct jme_softc *sc = arg1; 2874 struct ifnet *ifp = &sc->arpcom.ac_if; 2875 int error, v; 2876 2877 lwkt_serialize_enter(ifp->if_serializer); 2878 2879 v = sc->jme_rx_coal_to; 2880 error = sysctl_handle_int(oidp, &v, 0, req); 2881 if (error || req->newptr == NULL) 2882 goto back; 2883 2884 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 2885 error = EINVAL; 2886 goto back; 2887 } 2888 2889 if (v != sc->jme_rx_coal_to) { 2890 sc->jme_rx_coal_to = v; 2891 if (ifp->if_flags & IFF_RUNNING) 2892 jme_set_rx_coal(sc); 2893 } 2894 back: 2895 lwkt_serialize_exit(ifp->if_serializer); 2896 return error; 2897 } 2898 2899 static int 2900 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 2901 { 2902 struct jme_softc *sc = arg1; 2903 struct ifnet *ifp = &sc->arpcom.ac_if; 2904 int error, v; 2905 2906 lwkt_serialize_enter(ifp->if_serializer); 2907 2908 v = sc->jme_rx_coal_pkt; 2909 error = sysctl_handle_int(oidp, &v, 0, req); 2910 if (error || req->newptr == NULL) 2911 goto back; 2912 2913 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 2914 error = EINVAL; 2915 goto back; 2916 } 2917 2918 if (v != sc->jme_rx_coal_pkt) { 2919 sc->jme_rx_coal_pkt = v; 2920 if (ifp->if_flags & IFF_RUNNING) 2921 jme_set_rx_coal(sc); 2922 } 2923 back: 2924 lwkt_serialize_exit(ifp->if_serializer); 2925 return error; 2926 } 2927 2928 static void 2929 jme_set_tx_coal(struct jme_softc *sc) 2930 { 2931 uint32_t reg; 2932 2933 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 2934 PCCTX_COAL_TO_MASK; 2935 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 2936 PCCTX_COAL_PKT_MASK; 2937 reg |= PCCTX_COAL_TXQ0; 2938 CSR_WRITE_4(sc, JME_PCCTX, reg); 2939 } 2940 2941 static void 2942 jme_set_rx_coal(struct jme_softc *sc) 2943 { 2944 uint32_t reg; 2945 2946 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 2947 PCCRX_COAL_TO_MASK; 2948 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 2949 PCCRX_COAL_PKT_MASK; 2950 CSR_WRITE_4(sc, JME_PCCRX0, reg); 2951 } 2952