1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/interrupt.h> 36 #include <sys/malloc.h> 37 #include <sys/proc.h> 38 #include <sys/rman.h> 39 #include <sys/serialize.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/sysctl.h> 43 44 #include <net/ethernet.h> 45 #include <net/if.h> 46 #include <net/bpf.h> 47 #include <net/if_arp.h> 48 #include <net/if_dl.h> 49 #include <net/if_media.h> 50 #include <net/ifq_var.h> 51 #include <net/vlan/if_vlan_var.h> 52 #include <net/vlan/if_vlan_ether.h> 53 54 #include <dev/netif/mii_layer/miivar.h> 55 #include <dev/netif/mii_layer/jmphyreg.h> 56 57 #include <bus/pci/pcireg.h> 58 #include <bus/pci/pcivar.h> 59 #include <bus/pci/pcidevs.h> 60 61 #include <dev/netif/jme/if_jmereg.h> 62 #include <dev/netif/jme/if_jmevar.h> 63 64 #include "miibus_if.h" 65 66 /* Define the following to disable printing Rx errors. */ 67 #undef JME_SHOW_ERRORS 68 69 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 70 71 static int jme_probe(device_t); 72 static int jme_attach(device_t); 73 static int jme_detach(device_t); 74 static int jme_shutdown(device_t); 75 static int jme_suspend(device_t); 76 static int jme_resume(device_t); 77 78 static int jme_miibus_readreg(device_t, int, int); 79 static int jme_miibus_writereg(device_t, int, int, int); 80 static void jme_miibus_statchg(device_t); 81 82 static void jme_init(void *); 83 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 84 static void jme_start(struct ifnet *); 85 static void jme_watchdog(struct ifnet *); 86 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 87 static int jme_mediachange(struct ifnet *); 88 89 static void jme_intr(void *); 90 static void jme_txeof(struct jme_softc *); 91 static void jme_rxeof(struct jme_softc *); 92 93 static int jme_dma_alloc(struct jme_softc *); 94 static void jme_dma_free(struct jme_softc *, int); 95 static void jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int); 96 static void jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int, 97 bus_size_t, int); 98 static int jme_init_rx_ring(struct jme_softc *); 99 static void jme_init_tx_ring(struct jme_softc *); 100 static void jme_init_ssb(struct jme_softc *); 101 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int); 102 static int jme_encap(struct jme_softc *, struct mbuf **); 103 static void jme_rxpkt(struct jme_softc *); 104 105 static void jme_tick(void *); 106 static void jme_stop(struct jme_softc *); 107 static void jme_reset(struct jme_softc *); 108 static void jme_set_vlan(struct jme_softc *); 109 static void jme_set_filter(struct jme_softc *); 110 static void jme_stop_tx(struct jme_softc *); 111 static void jme_stop_rx(struct jme_softc *); 112 static void jme_mac_config(struct jme_softc *); 113 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 114 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 115 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 116 #ifdef notyet 117 static void jme_setwol(struct jme_softc *); 118 static void jme_setlinkspeed(struct jme_softc *); 119 #endif 120 static void jme_set_tx_coal(struct jme_softc *); 121 static void jme_set_rx_coal(struct jme_softc *); 122 123 static void jme_sysctl_node(struct jme_softc *); 124 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS); 125 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 126 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS); 127 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 128 129 /* 130 * Devices supported by this driver. 131 */ 132 static const struct jme_dev { 133 uint16_t jme_vendorid; 134 uint16_t jme_deviceid; 135 uint32_t jme_caps; 136 const char *jme_name; 137 } jme_devs[] = { 138 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250, 139 JME_CAP_JUMBO, 140 "JMicron Inc, JMC250 Gigabit Ethernet" }, 141 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260, 142 JME_CAP_FASTETH, 143 "JMicron Inc, JMC260 Fast Ethernet" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static device_method_t jme_methods[] = { 148 /* Device interface. */ 149 DEVMETHOD(device_probe, jme_probe), 150 DEVMETHOD(device_attach, jme_attach), 151 DEVMETHOD(device_detach, jme_detach), 152 DEVMETHOD(device_shutdown, jme_shutdown), 153 DEVMETHOD(device_suspend, jme_suspend), 154 DEVMETHOD(device_resume, jme_resume), 155 156 /* Bus interface. */ 157 DEVMETHOD(bus_print_child, bus_generic_print_child), 158 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 159 160 /* MII interface. */ 161 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 162 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 163 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 164 165 { NULL, NULL } 166 }; 167 168 static driver_t jme_driver = { 169 "jme", 170 jme_methods, 171 sizeof(struct jme_softc) 172 }; 173 174 static devclass_t jme_devclass; 175 176 DECLARE_DUMMY_MODULE(if_jme); 177 MODULE_DEPEND(if_jme, miibus, 1, 1, 1); 178 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0); 179 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); 180 181 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF; 182 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF; 183 184 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count); 185 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count); 186 187 /* 188 * Read a PHY register on the MII of the JMC250. 189 */ 190 static int 191 jme_miibus_readreg(device_t dev, int phy, int reg) 192 { 193 struct jme_softc *sc = device_get_softc(dev); 194 uint32_t val; 195 int i; 196 197 /* For FPGA version, PHY address 0 should be ignored. */ 198 if (sc->jme_caps & JME_CAP_FPGA) { 199 if (phy == 0) 200 return (0); 201 } else { 202 if (sc->jme_phyaddr != phy) 203 return (0); 204 } 205 206 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 207 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 208 209 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 210 DELAY(1); 211 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 212 break; 213 } 214 if (i == 0) { 215 device_printf(sc->jme_dev, "phy read timeout: " 216 "phy %d, reg %d\n", phy, reg); 217 return (0); 218 } 219 220 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 221 } 222 223 /* 224 * Write a PHY register on the MII of the JMC250. 225 */ 226 static int 227 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 228 { 229 struct jme_softc *sc = device_get_softc(dev); 230 int i; 231 232 /* For FPGA version, PHY address 0 should be ignored. */ 233 if (sc->jme_caps & JME_CAP_FPGA) { 234 if (phy == 0) 235 return (0); 236 } else { 237 if (sc->jme_phyaddr != phy) 238 return (0); 239 } 240 241 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 242 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 243 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 244 245 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 246 DELAY(1); 247 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 248 break; 249 } 250 if (i == 0) { 251 device_printf(sc->jme_dev, "phy write timeout: " 252 "phy %d, reg %d\n", phy, reg); 253 } 254 255 return (0); 256 } 257 258 /* 259 * Callback from MII layer when media changes. 260 */ 261 static void 262 jme_miibus_statchg(device_t dev) 263 { 264 struct jme_softc *sc = device_get_softc(dev); 265 struct ifnet *ifp = &sc->arpcom.ac_if; 266 struct mii_data *mii; 267 struct jme_txdesc *txd; 268 bus_addr_t paddr; 269 int i; 270 271 ASSERT_SERIALIZED(ifp->if_serializer); 272 273 if ((ifp->if_flags & IFF_RUNNING) == 0) 274 return; 275 276 mii = device_get_softc(sc->jme_miibus); 277 278 sc->jme_flags &= ~JME_FLAG_LINK; 279 if ((mii->mii_media_status & IFM_AVALID) != 0) { 280 switch (IFM_SUBTYPE(mii->mii_media_active)) { 281 case IFM_10_T: 282 case IFM_100_TX: 283 sc->jme_flags |= JME_FLAG_LINK; 284 break; 285 case IFM_1000_T: 286 if (sc->jme_caps & JME_CAP_FASTETH) 287 break; 288 sc->jme_flags |= JME_FLAG_LINK; 289 break; 290 default: 291 break; 292 } 293 } 294 295 /* 296 * Disabling Rx/Tx MACs have a side-effect of resetting 297 * JME_TXNDA/JME_RXNDA register to the first address of 298 * Tx/Rx descriptor address. So driver should reset its 299 * internal procucer/consumer pointer and reclaim any 300 * allocated resources. Note, just saving the value of 301 * JME_TXNDA and JME_RXNDA registers before stopping MAC 302 * and restoring JME_TXNDA/JME_RXNDA register is not 303 * sufficient to make sure correct MAC state because 304 * stopping MAC operation can take a while and hardware 305 * might have updated JME_TXNDA/JME_RXNDA registers 306 * during the stop operation. 307 */ 308 309 /* Disable interrupts */ 310 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 311 312 /* Stop driver */ 313 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 314 ifp->if_timer = 0; 315 callout_stop(&sc->jme_tick_ch); 316 317 /* Stop receiver/transmitter. */ 318 jme_stop_rx(sc); 319 jme_stop_tx(sc); 320 321 jme_rxeof(sc); 322 if (sc->jme_cdata.jme_rxhead != NULL) 323 m_freem(sc->jme_cdata.jme_rxhead); 324 JME_RXCHAIN_RESET(sc); 325 326 jme_txeof(sc); 327 if (sc->jme_cdata.jme_tx_cnt != 0) { 328 /* Remove queued packets for transmit. */ 329 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 330 txd = &sc->jme_cdata.jme_txdesc[i]; 331 if (txd->tx_m != NULL) { 332 bus_dmamap_unload( 333 sc->jme_cdata.jme_tx_tag, 334 txd->tx_dmamap); 335 m_freem(txd->tx_m); 336 txd->tx_m = NULL; 337 txd->tx_ndesc = 0; 338 ifp->if_oerrors++; 339 } 340 } 341 } 342 343 /* 344 * Reuse configured Rx descriptors and reset 345 * procuder/consumer index. 346 */ 347 sc->jme_cdata.jme_rx_cons = 0; 348 349 jme_init_tx_ring(sc); 350 351 /* Initialize shadow status block. */ 352 jme_init_ssb(sc); 353 354 /* Program MAC with resolved speed/duplex/flow-control. */ 355 if (sc->jme_flags & JME_FLAG_LINK) { 356 jme_mac_config(sc); 357 358 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 359 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 360 361 /* Set Tx ring address to the hardware. */ 362 paddr = JME_TX_RING_ADDR(sc, 0); 363 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 364 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 365 366 /* Set Rx ring address to the hardware. */ 367 paddr = JME_RX_RING_ADDR(sc, 0); 368 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 369 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 370 371 /* Restart receiver/transmitter. */ 372 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 373 RXCSR_RXQ_START); 374 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 375 } 376 377 ifp->if_flags |= IFF_RUNNING; 378 ifp->if_flags &= ~IFF_OACTIVE; 379 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 380 381 /* Reenable interrupts. */ 382 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 383 } 384 385 /* 386 * Get the current interface media status. 387 */ 388 static void 389 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 390 { 391 struct jme_softc *sc = ifp->if_softc; 392 struct mii_data *mii = device_get_softc(sc->jme_miibus); 393 394 ASSERT_SERIALIZED(ifp->if_serializer); 395 396 mii_pollstat(mii); 397 ifmr->ifm_status = mii->mii_media_status; 398 ifmr->ifm_active = mii->mii_media_active; 399 } 400 401 /* 402 * Set hardware to newly-selected media. 403 */ 404 static int 405 jme_mediachange(struct ifnet *ifp) 406 { 407 struct jme_softc *sc = ifp->if_softc; 408 struct mii_data *mii = device_get_softc(sc->jme_miibus); 409 int error; 410 411 ASSERT_SERIALIZED(ifp->if_serializer); 412 413 if (mii->mii_instance != 0) { 414 struct mii_softc *miisc; 415 416 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 417 mii_phy_reset(miisc); 418 } 419 error = mii_mediachg(mii); 420 421 return (error); 422 } 423 424 static int 425 jme_probe(device_t dev) 426 { 427 const struct jme_dev *sp; 428 uint16_t vid, did; 429 430 vid = pci_get_vendor(dev); 431 did = pci_get_device(dev); 432 for (sp = jme_devs; sp->jme_name != NULL; ++sp) { 433 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) { 434 struct jme_softc *sc = device_get_softc(dev); 435 436 sc->jme_caps = sp->jme_caps; 437 device_set_desc(dev, sp->jme_name); 438 return (0); 439 } 440 } 441 return (ENXIO); 442 } 443 444 static int 445 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 446 { 447 uint32_t reg; 448 int i; 449 450 *val = 0; 451 for (i = JME_TIMEOUT; i > 0; i--) { 452 reg = CSR_READ_4(sc, JME_SMBCSR); 453 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 454 break; 455 DELAY(1); 456 } 457 458 if (i == 0) { 459 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 460 return (ETIMEDOUT); 461 } 462 463 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 464 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 465 for (i = JME_TIMEOUT; i > 0; i--) { 466 DELAY(1); 467 reg = CSR_READ_4(sc, JME_SMBINTF); 468 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 469 break; 470 } 471 472 if (i == 0) { 473 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 474 return (ETIMEDOUT); 475 } 476 477 reg = CSR_READ_4(sc, JME_SMBINTF); 478 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 479 480 return (0); 481 } 482 483 static int 484 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 485 { 486 uint8_t fup, reg, val; 487 uint32_t offset; 488 int match; 489 490 offset = 0; 491 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 492 fup != JME_EEPROM_SIG0) 493 return (ENOENT); 494 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 495 fup != JME_EEPROM_SIG1) 496 return (ENOENT); 497 match = 0; 498 do { 499 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 500 break; 501 /* Check for the end of EEPROM descriptor. */ 502 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 503 break; 504 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, 505 JME_EEPROM_PAGE_BAR1) == fup) { 506 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 507 break; 508 if (reg >= JME_PAR0 && 509 reg < JME_PAR0 + ETHER_ADDR_LEN) { 510 if (jme_eeprom_read_byte(sc, offset + 2, 511 &val) != 0) 512 break; 513 eaddr[reg - JME_PAR0] = val; 514 match++; 515 } 516 } 517 /* Try next eeprom descriptor. */ 518 offset += JME_EEPROM_DESC_BYTES; 519 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 520 521 if (match == ETHER_ADDR_LEN) 522 return (0); 523 524 return (ENOENT); 525 } 526 527 static void 528 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 529 { 530 uint32_t par0, par1; 531 532 /* Read station address. */ 533 par0 = CSR_READ_4(sc, JME_PAR0); 534 par1 = CSR_READ_4(sc, JME_PAR1); 535 par1 &= 0xFFFF; 536 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) { 537 device_printf(sc->jme_dev, 538 "generating fake ethernet address.\n"); 539 par0 = karc4random(); 540 /* Set OUI to JMicron. */ 541 eaddr[0] = 0x00; 542 eaddr[1] = 0x1B; 543 eaddr[2] = 0x8C; 544 eaddr[3] = (par0 >> 16) & 0xff; 545 eaddr[4] = (par0 >> 8) & 0xff; 546 eaddr[5] = par0 & 0xff; 547 } else { 548 eaddr[0] = (par0 >> 0) & 0xFF; 549 eaddr[1] = (par0 >> 8) & 0xFF; 550 eaddr[2] = (par0 >> 16) & 0xFF; 551 eaddr[3] = (par0 >> 24) & 0xFF; 552 eaddr[4] = (par1 >> 0) & 0xFF; 553 eaddr[5] = (par1 >> 8) & 0xFF; 554 } 555 } 556 557 static int 558 jme_attach(device_t dev) 559 { 560 struct jme_softc *sc = device_get_softc(dev); 561 struct ifnet *ifp = &sc->arpcom.ac_if; 562 uint32_t reg; 563 uint16_t did; 564 uint8_t pcie_ptr, rev; 565 int error = 0; 566 uint8_t eaddr[ETHER_ADDR_LEN]; 567 568 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN); 569 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX) 570 sc->jme_rx_desc_cnt = JME_NDESC_MAX; 571 572 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN); 573 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX) 574 sc->jme_tx_desc_cnt = JME_NDESC_MAX; 575 576 sc->jme_dev = dev; 577 sc->jme_lowaddr = BUS_SPACE_MAXADDR; 578 579 ifp = &sc->arpcom.ac_if; 580 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 581 582 callout_init(&sc->jme_tick_ch); 583 584 #ifndef BURN_BRIDGES 585 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 586 uint32_t irq, mem; 587 588 irq = pci_read_config(dev, PCIR_INTLINE, 4); 589 mem = pci_read_config(dev, JME_PCIR_BAR, 4); 590 591 device_printf(dev, "chip is in D%d power mode " 592 "-- setting to D0\n", pci_get_powerstate(dev)); 593 594 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 595 596 pci_write_config(dev, PCIR_INTLINE, irq, 4); 597 pci_write_config(dev, JME_PCIR_BAR, mem, 4); 598 } 599 #endif /* !BURN_BRIDGE */ 600 601 /* Enable bus mastering */ 602 pci_enable_busmaster(dev); 603 604 /* 605 * Allocate IO memory 606 * 607 * JMC250 supports both memory mapped and I/O register space 608 * access. Because I/O register access should use different 609 * BARs to access registers it's waste of time to use I/O 610 * register spce access. JMC250 uses 16K to map entire memory 611 * space. 612 */ 613 sc->jme_mem_rid = JME_PCIR_BAR; 614 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 615 &sc->jme_mem_rid, RF_ACTIVE); 616 if (sc->jme_mem_res == NULL) { 617 device_printf(dev, "can't allocate IO memory\n"); 618 return ENXIO; 619 } 620 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res); 621 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res); 622 623 /* 624 * Allocate IRQ 625 */ 626 sc->jme_irq_rid = 0; 627 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 628 &sc->jme_irq_rid, 629 RF_SHAREABLE | RF_ACTIVE); 630 if (sc->jme_irq_res == NULL) { 631 device_printf(dev, "can't allocate irq\n"); 632 error = ENXIO; 633 goto fail; 634 } 635 636 /* 637 * Extract revisions 638 */ 639 reg = CSR_READ_4(sc, JME_CHIPMODE); 640 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 641 CHIPMODE_NOT_FPGA) { 642 sc->jme_caps |= JME_CAP_FPGA; 643 if (bootverbose) { 644 device_printf(dev, "FPGA revision: 0x%04x\n", 645 (reg & CHIPMODE_FPGA_REV_MASK) >> 646 CHIPMODE_FPGA_REV_SHIFT); 647 } 648 } 649 650 /* NOTE: FM revision is put in the upper 4 bits */ 651 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4; 652 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT; 653 if (bootverbose) 654 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev); 655 656 did = pci_get_device(dev); 657 switch (did) { 658 case PCI_PRODUCT_JMICRON_JMC250: 659 if (rev == JME_REV1_A2) 660 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX; 661 break; 662 663 case PCI_PRODUCT_JMICRON_JMC260: 664 if (rev == JME_REV2) 665 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT; 666 break; 667 668 default: 669 panic("unknown device id 0x%04x\n", did); 670 } 671 if (rev >= JME_REV2) { 672 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC; 673 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 | 674 GHC_TXMAC_CLKSRC_1000; 675 } 676 677 /* Reset the ethernet controller. */ 678 jme_reset(sc); 679 680 /* Get station address. */ 681 reg = CSR_READ_4(sc, JME_SMBCSR); 682 if (reg & SMBCSR_EEPROM_PRESENT) 683 error = jme_eeprom_macaddr(sc, eaddr); 684 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 685 if (error != 0 && (bootverbose)) { 686 device_printf(dev, "ethernet hardware address " 687 "not found in EEPROM.\n"); 688 } 689 jme_reg_macaddr(sc, eaddr); 690 } 691 692 /* 693 * Save PHY address. 694 * Integrated JR0211 has fixed PHY address whereas FPGA version 695 * requires PHY probing to get correct PHY address. 696 */ 697 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 698 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 699 GPREG0_PHY_ADDR_MASK; 700 if (bootverbose) { 701 device_printf(dev, "PHY is at address %d.\n", 702 sc->jme_phyaddr); 703 } 704 } else { 705 sc->jme_phyaddr = 0; 706 } 707 708 /* Set max allowable DMA size. */ 709 pcie_ptr = pci_get_pciecap_ptr(dev); 710 if (pcie_ptr != 0) { 711 uint16_t ctrl; 712 713 sc->jme_caps |= JME_CAP_PCIE; 714 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 715 if (bootverbose) { 716 device_printf(dev, "Read request size : %d bytes.\n", 717 128 << ((ctrl >> 12) & 0x07)); 718 device_printf(dev, "TLP payload size : %d bytes.\n", 719 128 << ((ctrl >> 5) & 0x07)); 720 } 721 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) { 722 case PCIEM_DEVCTL_MAX_READRQ_128: 723 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 724 break; 725 case PCIEM_DEVCTL_MAX_READRQ_256: 726 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 727 break; 728 default: 729 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 730 break; 731 } 732 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 733 } else { 734 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 735 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 736 } 737 738 #ifdef notyet 739 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 740 sc->jme_caps |= JME_CAP_PMCAP; 741 #endif 742 743 /* 744 * Create sysctl tree 745 */ 746 jme_sysctl_node(sc); 747 748 /* Allocate DMA stuffs */ 749 error = jme_dma_alloc(sc); 750 if (error) 751 goto fail; 752 753 ifp->if_softc = sc; 754 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 755 ifp->if_init = jme_init; 756 ifp->if_ioctl = jme_ioctl; 757 ifp->if_start = jme_start; 758 ifp->if_watchdog = jme_watchdog; 759 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD); 760 ifq_set_ready(&ifp->if_snd); 761 762 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */ 763 ifp->if_capabilities = IFCAP_HWCSUM | 764 IFCAP_VLAN_MTU | 765 IFCAP_VLAN_HWTAGGING; 766 ifp->if_hwassist = JME_CSUM_FEATURES; 767 ifp->if_capenable = ifp->if_capabilities; 768 769 /* Set up MII bus. */ 770 error = mii_phy_probe(dev, &sc->jme_miibus, 771 jme_mediachange, jme_mediastatus); 772 if (error) { 773 device_printf(dev, "no PHY found!\n"); 774 goto fail; 775 } 776 777 /* 778 * Save PHYADDR for FPGA mode PHY. 779 */ 780 if (sc->jme_caps & JME_CAP_FPGA) { 781 struct mii_data *mii = device_get_softc(sc->jme_miibus); 782 783 if (mii->mii_instance != 0) { 784 struct mii_softc *miisc; 785 786 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 787 if (miisc->mii_phy != 0) { 788 sc->jme_phyaddr = miisc->mii_phy; 789 break; 790 } 791 } 792 if (sc->jme_phyaddr != 0) { 793 device_printf(sc->jme_dev, 794 "FPGA PHY is at %d\n", sc->jme_phyaddr); 795 /* vendor magic. */ 796 jme_miibus_writereg(dev, sc->jme_phyaddr, 797 JMPHY_CONF, JMPHY_CONF_DEFFIFO); 798 799 /* XXX should we clear JME_WA_EXTFIFO */ 800 } 801 } 802 } 803 804 ether_ifattach(ifp, eaddr, NULL); 805 806 /* Tell the upper layer(s) we support long frames. */ 807 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 808 809 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc, 810 &sc->jme_irq_handle, ifp->if_serializer); 811 if (error) { 812 device_printf(dev, "could not set up interrupt handler.\n"); 813 ether_ifdetach(ifp); 814 goto fail; 815 } 816 817 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res)); 818 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 819 return 0; 820 fail: 821 jme_detach(dev); 822 return (error); 823 } 824 825 static int 826 jme_detach(device_t dev) 827 { 828 struct jme_softc *sc = device_get_softc(dev); 829 830 if (device_is_attached(dev)) { 831 struct ifnet *ifp = &sc->arpcom.ac_if; 832 833 lwkt_serialize_enter(ifp->if_serializer); 834 jme_stop(sc); 835 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle); 836 lwkt_serialize_exit(ifp->if_serializer); 837 838 ether_ifdetach(ifp); 839 } 840 841 if (sc->jme_sysctl_tree != NULL) 842 sysctl_ctx_free(&sc->jme_sysctl_ctx); 843 844 if (sc->jme_miibus != NULL) 845 device_delete_child(dev, sc->jme_miibus); 846 bus_generic_detach(dev); 847 848 if (sc->jme_irq_res != NULL) { 849 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid, 850 sc->jme_irq_res); 851 } 852 853 if (sc->jme_mem_res != NULL) { 854 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid, 855 sc->jme_mem_res); 856 } 857 858 jme_dma_free(sc, 1); 859 860 return (0); 861 } 862 863 static void 864 jme_sysctl_node(struct jme_softc *sc) 865 { 866 int coal_max; 867 868 sysctl_ctx_init(&sc->jme_sysctl_ctx); 869 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx, 870 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 871 device_get_nameunit(sc->jme_dev), 872 CTLFLAG_RD, 0, ""); 873 if (sc->jme_sysctl_tree == NULL) { 874 device_printf(sc->jme_dev, "can't add sysctl node\n"); 875 return; 876 } 877 878 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 879 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 880 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 881 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout"); 882 883 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 884 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 885 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 886 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet"); 887 888 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 889 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 890 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, 891 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout"); 892 893 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx, 894 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 895 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, 896 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet"); 897 898 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 899 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 900 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt, 901 0, "RX desc count"); 902 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx, 903 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO, 904 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt, 905 0, "TX desc count"); 906 907 /* 908 * Set default coalesce valves 909 */ 910 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 911 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 912 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 913 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 914 915 /* 916 * Adjust coalesce valves, in case that the number of TX/RX 917 * descs are set to small values by users. 918 * 919 * NOTE: coal_max will not be zero, since number of descs 920 * must aligned by JME_NDESC_ALIGN (16 currently) 921 */ 922 coal_max = sc->jme_tx_desc_cnt / 6; 923 if (coal_max < sc->jme_tx_coal_pkt) 924 sc->jme_tx_coal_pkt = coal_max; 925 926 coal_max = sc->jme_rx_desc_cnt / 4; 927 if (coal_max < sc->jme_rx_coal_pkt) 928 sc->jme_rx_coal_pkt = coal_max; 929 } 930 931 static void 932 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 933 { 934 if (error) 935 return; 936 937 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 938 *((bus_addr_t *)arg) = segs->ds_addr; 939 } 940 941 static void 942 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs, 943 bus_size_t mapsz __unused, int error) 944 { 945 struct jme_dmamap_ctx *ctx = xctx; 946 int i; 947 948 if (error) 949 return; 950 951 if (nsegs > ctx->nsegs) { 952 ctx->nsegs = 0; 953 return; 954 } 955 956 ctx->nsegs = nsegs; 957 for (i = 0; i < nsegs; ++i) 958 ctx->segs[i] = segs[i]; 959 } 960 961 static int 962 jme_dma_alloc(struct jme_softc *sc) 963 { 964 struct jme_txdesc *txd; 965 struct jme_rxdesc *rxd; 966 bus_addr_t busaddr, lowaddr; 967 int error, i; 968 969 sc->jme_cdata.jme_txdesc = 970 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc), 971 M_DEVBUF, M_WAITOK | M_ZERO); 972 sc->jme_cdata.jme_rxdesc = 973 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc), 974 M_DEVBUF, M_WAITOK | M_ZERO); 975 976 lowaddr = sc->jme_lowaddr; 977 again: 978 /* Create parent ring tag. */ 979 error = bus_dma_tag_create(NULL,/* parent */ 980 1, 0, /* algnmnt, boundary */ 981 lowaddr, /* lowaddr */ 982 BUS_SPACE_MAXADDR, /* highaddr */ 983 NULL, NULL, /* filter, filterarg */ 984 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 985 0, /* nsegments */ 986 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 987 0, /* flags */ 988 &sc->jme_cdata.jme_ring_tag); 989 if (error) { 990 device_printf(sc->jme_dev, 991 "could not create parent ring DMA tag.\n"); 992 return error; 993 } 994 995 /* 996 * Create DMA stuffs for TX ring 997 */ 998 999 /* Create tag for Tx ring. */ 1000 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1001 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 1002 lowaddr, /* lowaddr */ 1003 BUS_SPACE_MAXADDR, /* highaddr */ 1004 NULL, NULL, /* filter, filterarg */ 1005 JME_TX_RING_SIZE(sc), /* maxsize */ 1006 1, /* nsegments */ 1007 JME_TX_RING_SIZE(sc), /* maxsegsize */ 1008 0, /* flags */ 1009 &sc->jme_cdata.jme_tx_ring_tag); 1010 if (error) { 1011 device_printf(sc->jme_dev, 1012 "could not allocate Tx ring DMA tag.\n"); 1013 return error; 1014 } 1015 1016 /* Allocate DMA'able memory for TX ring */ 1017 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag, 1018 (void **)&sc->jme_rdata.jme_tx_ring, 1019 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1020 &sc->jme_cdata.jme_tx_ring_map); 1021 if (error) { 1022 device_printf(sc->jme_dev, 1023 "could not allocate DMA'able memory for Tx ring.\n"); 1024 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1025 sc->jme_cdata.jme_tx_ring_tag = NULL; 1026 return error; 1027 } 1028 1029 /* Load the DMA map for Tx ring. */ 1030 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag, 1031 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 1032 JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT); 1033 if (error) { 1034 device_printf(sc->jme_dev, 1035 "could not load DMA'able memory for Tx ring.\n"); 1036 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1037 sc->jme_rdata.jme_tx_ring, 1038 sc->jme_cdata.jme_tx_ring_map); 1039 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1040 sc->jme_cdata.jme_tx_ring_tag = NULL; 1041 return error; 1042 } 1043 sc->jme_rdata.jme_tx_ring_paddr = busaddr; 1044 1045 /* 1046 * Create DMA stuffs for RX ring 1047 */ 1048 1049 /* Create tag for Rx ring. */ 1050 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1051 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 1052 lowaddr, /* lowaddr */ 1053 BUS_SPACE_MAXADDR, /* highaddr */ 1054 NULL, NULL, /* filter, filterarg */ 1055 JME_RX_RING_SIZE(sc), /* maxsize */ 1056 1, /* nsegments */ 1057 JME_RX_RING_SIZE(sc), /* maxsegsize */ 1058 0, /* flags */ 1059 &sc->jme_cdata.jme_rx_ring_tag); 1060 if (error) { 1061 device_printf(sc->jme_dev, 1062 "could not allocate Rx ring DMA tag.\n"); 1063 return error; 1064 } 1065 1066 /* Allocate DMA'able memory for RX ring */ 1067 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag, 1068 (void **)&sc->jme_rdata.jme_rx_ring, 1069 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1070 &sc->jme_cdata.jme_rx_ring_map); 1071 if (error) { 1072 device_printf(sc->jme_dev, 1073 "could not allocate DMA'able memory for Rx ring.\n"); 1074 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1075 sc->jme_cdata.jme_rx_ring_tag = NULL; 1076 return error; 1077 } 1078 1079 /* Load the DMA map for Rx ring. */ 1080 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag, 1081 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 1082 JME_RX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT); 1083 if (error) { 1084 device_printf(sc->jme_dev, 1085 "could not load DMA'able memory for Rx ring.\n"); 1086 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1087 sc->jme_rdata.jme_rx_ring, 1088 sc->jme_cdata.jme_rx_ring_map); 1089 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1090 sc->jme_cdata.jme_rx_ring_tag = NULL; 1091 return error; 1092 } 1093 sc->jme_rdata.jme_rx_ring_paddr = busaddr; 1094 1095 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1096 bus_addr_t rx_ring_end, tx_ring_end; 1097 1098 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 1099 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + 1100 JME_TX_RING_SIZE(sc); 1101 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + 1102 JME_RX_RING_SIZE(sc); 1103 if ((JME_ADDR_HI(tx_ring_end) != 1104 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 1105 (JME_ADDR_HI(rx_ring_end) != 1106 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 1107 device_printf(sc->jme_dev, "4GB boundary crossed, " 1108 "switching to 32bit DMA address mode.\n"); 1109 jme_dma_free(sc, 0); 1110 /* Limit DMA address space to 32bit and try again. */ 1111 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1112 goto again; 1113 } 1114 } 1115 1116 /* Create parent buffer tag. */ 1117 error = bus_dma_tag_create(NULL,/* parent */ 1118 1, 0, /* algnmnt, boundary */ 1119 sc->jme_lowaddr, /* lowaddr */ 1120 BUS_SPACE_MAXADDR, /* highaddr */ 1121 NULL, NULL, /* filter, filterarg */ 1122 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1123 0, /* nsegments */ 1124 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1125 0, /* flags */ 1126 &sc->jme_cdata.jme_buffer_tag); 1127 if (error) { 1128 device_printf(sc->jme_dev, 1129 "could not create parent buffer DMA tag.\n"); 1130 return error; 1131 } 1132 1133 /* 1134 * Create DMA stuffs for shadow status block 1135 */ 1136 1137 /* Create shadow status block tag. */ 1138 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1139 JME_SSB_ALIGN, 0, /* algnmnt, boundary */ 1140 sc->jme_lowaddr, /* lowaddr */ 1141 BUS_SPACE_MAXADDR, /* highaddr */ 1142 NULL, NULL, /* filter, filterarg */ 1143 JME_SSB_SIZE, /* maxsize */ 1144 1, /* nsegments */ 1145 JME_SSB_SIZE, /* maxsegsize */ 1146 0, /* flags */ 1147 &sc->jme_cdata.jme_ssb_tag); 1148 if (error) { 1149 device_printf(sc->jme_dev, 1150 "could not create shared status block DMA tag.\n"); 1151 return error; 1152 } 1153 1154 /* Allocate DMA'able memory for shared status block. */ 1155 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag, 1156 (void **)&sc->jme_rdata.jme_ssb_block, 1157 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1158 &sc->jme_cdata.jme_ssb_map); 1159 if (error) { 1160 device_printf(sc->jme_dev, "could not allocate DMA'able " 1161 "memory for shared status block.\n"); 1162 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1163 sc->jme_cdata.jme_ssb_tag = NULL; 1164 return error; 1165 } 1166 1167 /* Load the DMA map for shared status block */ 1168 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag, 1169 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 1170 JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT); 1171 if (error) { 1172 device_printf(sc->jme_dev, "could not load DMA'able memory " 1173 "for shared status block.\n"); 1174 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1175 sc->jme_rdata.jme_ssb_block, 1176 sc->jme_cdata.jme_ssb_map); 1177 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1178 sc->jme_cdata.jme_ssb_tag = NULL; 1179 return error; 1180 } 1181 sc->jme_rdata.jme_ssb_block_paddr = busaddr; 1182 1183 /* 1184 * Create DMA stuffs for TX buffers 1185 */ 1186 1187 /* Create tag for Tx buffers. */ 1188 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1189 1, 0, /* algnmnt, boundary */ 1190 sc->jme_lowaddr, /* lowaddr */ 1191 BUS_SPACE_MAXADDR, /* highaddr */ 1192 NULL, NULL, /* filter, filterarg */ 1193 JME_TSO_MAXSIZE, /* maxsize */ 1194 JME_MAXTXSEGS, /* nsegments */ 1195 JME_TSO_MAXSEGSIZE, /* maxsegsize */ 1196 0, /* flags */ 1197 &sc->jme_cdata.jme_tx_tag); 1198 if (error != 0) { 1199 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1200 return error; 1201 } 1202 1203 /* Create DMA maps for Tx buffers. */ 1204 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 1205 txd = &sc->jme_cdata.jme_txdesc[i]; 1206 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0, 1207 &txd->tx_dmamap); 1208 if (error) { 1209 int j; 1210 1211 device_printf(sc->jme_dev, 1212 "could not create %dth Tx dmamap.\n", i); 1213 1214 for (j = 0; j < i; ++j) { 1215 txd = &sc->jme_cdata.jme_txdesc[j]; 1216 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1217 txd->tx_dmamap); 1218 } 1219 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1220 sc->jme_cdata.jme_tx_tag = NULL; 1221 return error; 1222 } 1223 } 1224 1225 /* 1226 * Create DMA stuffs for RX buffers 1227 */ 1228 1229 /* Create tag for Rx buffers. */ 1230 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1231 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 1232 sc->jme_lowaddr, /* lowaddr */ 1233 BUS_SPACE_MAXADDR, /* highaddr */ 1234 NULL, NULL, /* filter, filterarg */ 1235 MCLBYTES, /* maxsize */ 1236 1, /* nsegments */ 1237 MCLBYTES, /* maxsegsize */ 1238 0, /* flags */ 1239 &sc->jme_cdata.jme_rx_tag); 1240 if (error) { 1241 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n"); 1242 return error; 1243 } 1244 1245 /* Create DMA maps for Rx buffers. */ 1246 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1247 &sc->jme_cdata.jme_rx_sparemap); 1248 if (error) { 1249 device_printf(sc->jme_dev, 1250 "could not create spare Rx dmamap.\n"); 1251 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1252 sc->jme_cdata.jme_rx_tag = NULL; 1253 return error; 1254 } 1255 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 1256 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1257 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1258 &rxd->rx_dmamap); 1259 if (error) { 1260 int j; 1261 1262 device_printf(sc->jme_dev, 1263 "could not create %dth Rx dmamap.\n", i); 1264 1265 for (j = 0; j < i; ++j) { 1266 rxd = &sc->jme_cdata.jme_rxdesc[j]; 1267 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1268 rxd->rx_dmamap); 1269 } 1270 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1271 sc->jme_cdata.jme_rx_sparemap); 1272 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1273 sc->jme_cdata.jme_rx_tag = NULL; 1274 return error; 1275 } 1276 } 1277 return 0; 1278 } 1279 1280 static void 1281 jme_dma_free(struct jme_softc *sc, int detach) 1282 { 1283 struct jme_txdesc *txd; 1284 struct jme_rxdesc *rxd; 1285 int i; 1286 1287 /* Tx ring */ 1288 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1289 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1290 sc->jme_cdata.jme_tx_ring_map); 1291 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1292 sc->jme_rdata.jme_tx_ring, 1293 sc->jme_cdata.jme_tx_ring_map); 1294 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1295 sc->jme_cdata.jme_tx_ring_tag = NULL; 1296 } 1297 1298 /* Rx ring */ 1299 if (sc->jme_cdata.jme_rx_ring_tag != NULL) { 1300 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag, 1301 sc->jme_cdata.jme_rx_ring_map); 1302 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1303 sc->jme_rdata.jme_rx_ring, 1304 sc->jme_cdata.jme_rx_ring_map); 1305 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1306 sc->jme_cdata.jme_rx_ring_tag = NULL; 1307 } 1308 1309 /* Tx buffers */ 1310 if (sc->jme_cdata.jme_tx_tag != NULL) { 1311 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 1312 txd = &sc->jme_cdata.jme_txdesc[i]; 1313 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1314 txd->tx_dmamap); 1315 } 1316 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1317 sc->jme_cdata.jme_tx_tag = NULL; 1318 } 1319 1320 /* Rx buffers */ 1321 if (sc->jme_cdata.jme_rx_tag != NULL) { 1322 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 1323 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1324 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1325 rxd->rx_dmamap); 1326 } 1327 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1328 sc->jme_cdata.jme_rx_sparemap); 1329 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1330 sc->jme_cdata.jme_rx_tag = NULL; 1331 } 1332 1333 /* Shadow status block. */ 1334 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1335 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1336 sc->jme_cdata.jme_ssb_map); 1337 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1338 sc->jme_rdata.jme_ssb_block, 1339 sc->jme_cdata.jme_ssb_map); 1340 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1341 sc->jme_cdata.jme_ssb_tag = NULL; 1342 } 1343 1344 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1345 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1346 sc->jme_cdata.jme_buffer_tag = NULL; 1347 } 1348 if (sc->jme_cdata.jme_ring_tag != NULL) { 1349 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1350 sc->jme_cdata.jme_ring_tag = NULL; 1351 } 1352 1353 if (detach) { 1354 if (sc->jme_cdata.jme_txdesc != NULL) { 1355 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF); 1356 sc->jme_cdata.jme_txdesc = NULL; 1357 } 1358 if (sc->jme_cdata.jme_rxdesc != NULL) { 1359 kfree(sc->jme_cdata.jme_rxdesc, M_DEVBUF); 1360 sc->jme_cdata.jme_rxdesc = NULL; 1361 } 1362 } 1363 } 1364 1365 /* 1366 * Make sure the interface is stopped at reboot time. 1367 */ 1368 static int 1369 jme_shutdown(device_t dev) 1370 { 1371 return jme_suspend(dev); 1372 } 1373 1374 #ifdef notyet 1375 /* 1376 * Unlike other ethernet controllers, JMC250 requires 1377 * explicit resetting link speed to 10/100Mbps as gigabit 1378 * link will cunsume more power than 375mA. 1379 * Note, we reset the link speed to 10/100Mbps with 1380 * auto-negotiation but we don't know whether that operation 1381 * would succeed or not as we have no control after powering 1382 * off. If the renegotiation fail WOL may not work. Running 1383 * at 1Gbps draws more power than 375mA at 3.3V which is 1384 * specified in PCI specification and that would result in 1385 * complete shutdowning power to ethernet controller. 1386 * 1387 * TODO 1388 * Save current negotiated media speed/duplex/flow-control 1389 * to softc and restore the same link again after resuming. 1390 * PHY handling such as power down/resetting to 100Mbps 1391 * may be better handled in suspend method in phy driver. 1392 */ 1393 static void 1394 jme_setlinkspeed(struct jme_softc *sc) 1395 { 1396 struct mii_data *mii; 1397 int aneg, i; 1398 1399 JME_LOCK_ASSERT(sc); 1400 1401 mii = device_get_softc(sc->jme_miibus); 1402 mii_pollstat(mii); 1403 aneg = 0; 1404 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1405 switch IFM_SUBTYPE(mii->mii_media_active) { 1406 case IFM_10_T: 1407 case IFM_100_TX: 1408 return; 1409 case IFM_1000_T: 1410 aneg++; 1411 default: 1412 break; 1413 } 1414 } 1415 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1416 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1417 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1418 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1419 BMCR_AUTOEN | BMCR_STARTNEG); 1420 DELAY(1000); 1421 if (aneg != 0) { 1422 /* Poll link state until jme(4) get a 10/100 link. */ 1423 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1424 mii_pollstat(mii); 1425 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1426 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1427 case IFM_10_T: 1428 case IFM_100_TX: 1429 jme_mac_config(sc); 1430 return; 1431 default: 1432 break; 1433 } 1434 } 1435 JME_UNLOCK(sc); 1436 pause("jmelnk", hz); 1437 JME_LOCK(sc); 1438 } 1439 if (i == MII_ANEGTICKS_GIGE) 1440 device_printf(sc->jme_dev, "establishing link failed, " 1441 "WOL may not work!"); 1442 } 1443 /* 1444 * No link, force MAC to have 100Mbps, full-duplex link. 1445 * This is the last resort and may/may not work. 1446 */ 1447 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1448 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1449 jme_mac_config(sc); 1450 } 1451 1452 static void 1453 jme_setwol(struct jme_softc *sc) 1454 { 1455 struct ifnet *ifp = &sc->arpcom.ac_if; 1456 uint32_t gpr, pmcs; 1457 uint16_t pmstat; 1458 int pmc; 1459 1460 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1461 /* No PME capability, PHY power down. */ 1462 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1463 MII_BMCR, BMCR_PDOWN); 1464 return; 1465 } 1466 1467 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1468 pmcs = CSR_READ_4(sc, JME_PMCS); 1469 pmcs &= ~PMCS_WOL_ENB_MASK; 1470 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1471 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1472 /* Enable PME message. */ 1473 gpr |= GPREG0_PME_ENB; 1474 /* For gigabit controllers, reset link speed to 10/100. */ 1475 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1476 jme_setlinkspeed(sc); 1477 } 1478 1479 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1480 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1481 1482 /* Request PME. */ 1483 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1484 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1485 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1486 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1487 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1488 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1489 /* No WOL, PHY power down. */ 1490 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1491 MII_BMCR, BMCR_PDOWN); 1492 } 1493 } 1494 #endif 1495 1496 static int 1497 jme_suspend(device_t dev) 1498 { 1499 struct jme_softc *sc = device_get_softc(dev); 1500 struct ifnet *ifp = &sc->arpcom.ac_if; 1501 1502 lwkt_serialize_enter(ifp->if_serializer); 1503 jme_stop(sc); 1504 #ifdef notyet 1505 jme_setwol(sc); 1506 #endif 1507 lwkt_serialize_exit(ifp->if_serializer); 1508 1509 return (0); 1510 } 1511 1512 static int 1513 jme_resume(device_t dev) 1514 { 1515 struct jme_softc *sc = device_get_softc(dev); 1516 struct ifnet *ifp = &sc->arpcom.ac_if; 1517 #ifdef notyet 1518 int pmc; 1519 #endif 1520 1521 lwkt_serialize_enter(ifp->if_serializer); 1522 1523 #ifdef notyet 1524 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1525 uint16_t pmstat; 1526 1527 pmstat = pci_read_config(sc->jme_dev, 1528 pmc + PCIR_POWER_STATUS, 2); 1529 /* Disable PME clear PME status. */ 1530 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1531 pci_write_config(sc->jme_dev, 1532 pmc + PCIR_POWER_STATUS, pmstat, 2); 1533 } 1534 #endif 1535 1536 if (ifp->if_flags & IFF_UP) 1537 jme_init(sc); 1538 1539 lwkt_serialize_exit(ifp->if_serializer); 1540 1541 return (0); 1542 } 1543 1544 static int 1545 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1546 { 1547 struct jme_txdesc *txd; 1548 struct jme_desc *desc; 1549 struct mbuf *m; 1550 struct jme_dmamap_ctx ctx; 1551 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1552 int maxsegs; 1553 int error, i, prod, symbol_desc; 1554 uint32_t cflags, flag64; 1555 1556 M_ASSERTPKTHDR((*m_head)); 1557 1558 prod = sc->jme_cdata.jme_tx_prod; 1559 txd = &sc->jme_cdata.jme_txdesc[prod]; 1560 1561 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 1562 symbol_desc = 1; 1563 else 1564 symbol_desc = 0; 1565 1566 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) - 1567 (JME_TXD_RSVD + symbol_desc); 1568 if (maxsegs > JME_MAXTXSEGS) 1569 maxsegs = JME_MAXTXSEGS; 1570 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc), 1571 ("not enough segments %d\n", maxsegs)); 1572 1573 ctx.nsegs = maxsegs; 1574 ctx.segs = txsegs; 1575 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1576 *m_head, jme_dmamap_buf_cb, &ctx, 1577 BUS_DMA_NOWAIT); 1578 if (!error && ctx.nsegs == 0) { 1579 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 1580 error = EFBIG; 1581 } 1582 if (error == EFBIG) { 1583 m = m_defrag(*m_head, MB_DONTWAIT); 1584 if (m == NULL) { 1585 if_printf(&sc->arpcom.ac_if, 1586 "could not defrag TX mbuf\n"); 1587 error = ENOBUFS; 1588 goto fail; 1589 } 1590 *m_head = m; 1591 1592 ctx.nsegs = maxsegs; 1593 ctx.segs = txsegs; 1594 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, 1595 txd->tx_dmamap, *m_head, 1596 jme_dmamap_buf_cb, &ctx, 1597 BUS_DMA_NOWAIT); 1598 if (error || ctx.nsegs == 0) { 1599 if_printf(&sc->arpcom.ac_if, 1600 "could not load defragged TX mbuf\n"); 1601 if (!error) { 1602 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 1603 txd->tx_dmamap); 1604 error = EFBIG; 1605 } 1606 goto fail; 1607 } 1608 } else if (error) { 1609 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n"); 1610 goto fail; 1611 } 1612 1613 m = *m_head; 1614 cflags = 0; 1615 1616 /* Configure checksum offload. */ 1617 if (m->m_pkthdr.csum_flags & CSUM_IP) 1618 cflags |= JME_TD_IPCSUM; 1619 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1620 cflags |= JME_TD_TCPCSUM; 1621 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1622 cflags |= JME_TD_UDPCSUM; 1623 1624 /* Configure VLAN. */ 1625 if (m->m_flags & M_VLANTAG) { 1626 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK); 1627 cflags |= JME_TD_VLAN_TAG; 1628 } 1629 1630 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1631 desc->flags = htole32(cflags); 1632 desc->addr_hi = htole32(m->m_pkthdr.len); 1633 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1634 /* 1635 * Use 64bits TX desc chain format. 1636 * 1637 * The first TX desc of the chain, which is setup here, 1638 * is just a symbol TX desc carrying no payload. 1639 */ 1640 flag64 = JME_TD_64BIT; 1641 desc->buflen = 0; 1642 desc->addr_lo = 0; 1643 1644 /* No effective TX desc is consumed */ 1645 i = 0; 1646 } else { 1647 /* 1648 * Use 32bits TX desc chain format. 1649 * 1650 * The first TX desc of the chain, which is setup here, 1651 * is an effective TX desc carrying the first segment of 1652 * the mbuf chain. 1653 */ 1654 flag64 = 0; 1655 desc->buflen = htole32(txsegs[0].ds_len); 1656 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr)); 1657 1658 /* One effective TX desc is consumed */ 1659 i = 1; 1660 } 1661 sc->jme_cdata.jme_tx_cnt++; 1662 KKASSERT(sc->jme_cdata.jme_tx_cnt < sc->jme_tx_desc_cnt - JME_TXD_RSVD); 1663 JME_DESC_INC(prod, sc->jme_tx_desc_cnt); 1664 1665 txd->tx_ndesc = 1 - i; 1666 for (; i < ctx.nsegs; i++) { 1667 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1668 desc->flags = htole32(JME_TD_OWN | flag64); 1669 desc->buflen = htole32(txsegs[i].ds_len); 1670 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1671 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1672 1673 sc->jme_cdata.jme_tx_cnt++; 1674 KKASSERT(sc->jme_cdata.jme_tx_cnt <= 1675 sc->jme_tx_desc_cnt - JME_TXD_RSVD); 1676 JME_DESC_INC(prod, sc->jme_tx_desc_cnt); 1677 } 1678 1679 /* Update producer index. */ 1680 sc->jme_cdata.jme_tx_prod = prod; 1681 /* 1682 * Finally request interrupt and give the first descriptor 1683 * owenership to hardware. 1684 */ 1685 desc = txd->tx_desc; 1686 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1687 1688 txd->tx_m = m; 1689 txd->tx_ndesc += ctx.nsegs; 1690 1691 /* Sync descriptors. */ 1692 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1693 BUS_DMASYNC_PREWRITE); 1694 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1695 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE); 1696 return 0; 1697 fail: 1698 m_freem(*m_head); 1699 *m_head = NULL; 1700 return error; 1701 } 1702 1703 static void 1704 jme_start(struct ifnet *ifp) 1705 { 1706 struct jme_softc *sc = ifp->if_softc; 1707 struct mbuf *m_head; 1708 int enq = 0; 1709 1710 ASSERT_SERIALIZED(ifp->if_serializer); 1711 1712 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1713 ifq_purge(&ifp->if_snd); 1714 return; 1715 } 1716 1717 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1718 return; 1719 1720 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc)) 1721 jme_txeof(sc); 1722 1723 while (!ifq_is_empty(&ifp->if_snd)) { 1724 /* 1725 * Check number of available TX descs, always 1726 * leave JME_TXD_RSVD free TX descs. 1727 */ 1728 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1729 sc->jme_tx_desc_cnt - JME_TXD_RSVD) { 1730 ifp->if_flags |= IFF_OACTIVE; 1731 break; 1732 } 1733 1734 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1735 if (m_head == NULL) 1736 break; 1737 1738 /* 1739 * Pack the data into the transmit ring. If we 1740 * don't have room, set the OACTIVE flag and wait 1741 * for the NIC to drain the ring. 1742 */ 1743 if (jme_encap(sc, &m_head)) { 1744 KKASSERT(m_head == NULL); 1745 ifp->if_oerrors++; 1746 ifp->if_flags |= IFF_OACTIVE; 1747 break; 1748 } 1749 enq++; 1750 1751 /* 1752 * If there's a BPF listener, bounce a copy of this frame 1753 * to him. 1754 */ 1755 ETHER_BPF_MTAP(ifp, m_head); 1756 } 1757 1758 if (enq > 0) { 1759 /* 1760 * Reading TXCSR takes very long time under heavy load 1761 * so cache TXCSR value and writes the ORed value with 1762 * the kick command to the TXCSR. This saves one register 1763 * access cycle. 1764 */ 1765 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1766 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1767 /* Set a timeout in case the chip goes out to lunch. */ 1768 ifp->if_timer = JME_TX_TIMEOUT; 1769 } 1770 } 1771 1772 static void 1773 jme_watchdog(struct ifnet *ifp) 1774 { 1775 struct jme_softc *sc = ifp->if_softc; 1776 1777 ASSERT_SERIALIZED(ifp->if_serializer); 1778 1779 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1780 if_printf(ifp, "watchdog timeout (missed link)\n"); 1781 ifp->if_oerrors++; 1782 jme_init(sc); 1783 return; 1784 } 1785 1786 jme_txeof(sc); 1787 if (sc->jme_cdata.jme_tx_cnt == 0) { 1788 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 1789 "-- recovering\n"); 1790 if (!ifq_is_empty(&ifp->if_snd)) 1791 if_devstart(ifp); 1792 return; 1793 } 1794 1795 if_printf(ifp, "watchdog timeout\n"); 1796 ifp->if_oerrors++; 1797 jme_init(sc); 1798 if (!ifq_is_empty(&ifp->if_snd)) 1799 if_devstart(ifp); 1800 } 1801 1802 static int 1803 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1804 { 1805 struct jme_softc *sc = ifp->if_softc; 1806 struct mii_data *mii = device_get_softc(sc->jme_miibus); 1807 struct ifreq *ifr = (struct ifreq *)data; 1808 int error = 0, mask; 1809 1810 ASSERT_SERIALIZED(ifp->if_serializer); 1811 1812 switch (cmd) { 1813 case SIOCSIFMTU: 1814 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1815 (!(sc->jme_caps & JME_CAP_JUMBO) && 1816 ifr->ifr_mtu > JME_MAX_MTU)) { 1817 error = EINVAL; 1818 break; 1819 } 1820 1821 if (ifp->if_mtu != ifr->ifr_mtu) { 1822 /* 1823 * No special configuration is required when interface 1824 * MTU is changed but availability of Tx checksum 1825 * offload should be chcked against new MTU size as 1826 * FIFO size is just 2K. 1827 */ 1828 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 1829 ifp->if_capenable &= ~IFCAP_TXCSUM; 1830 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1831 } 1832 ifp->if_mtu = ifr->ifr_mtu; 1833 if (ifp->if_flags & IFF_RUNNING) 1834 jme_init(sc); 1835 } 1836 break; 1837 1838 case SIOCSIFFLAGS: 1839 if (ifp->if_flags & IFF_UP) { 1840 if (ifp->if_flags & IFF_RUNNING) { 1841 if ((ifp->if_flags ^ sc->jme_if_flags) & 1842 (IFF_PROMISC | IFF_ALLMULTI)) 1843 jme_set_filter(sc); 1844 } else { 1845 jme_init(sc); 1846 } 1847 } else { 1848 if (ifp->if_flags & IFF_RUNNING) 1849 jme_stop(sc); 1850 } 1851 sc->jme_if_flags = ifp->if_flags; 1852 break; 1853 1854 case SIOCADDMULTI: 1855 case SIOCDELMULTI: 1856 if (ifp->if_flags & IFF_RUNNING) 1857 jme_set_filter(sc); 1858 break; 1859 1860 case SIOCSIFMEDIA: 1861 case SIOCGIFMEDIA: 1862 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1863 break; 1864 1865 case SIOCSIFCAP: 1866 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1867 1868 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) { 1869 if (IFCAP_TXCSUM & ifp->if_capabilities) { 1870 ifp->if_capenable ^= IFCAP_TXCSUM; 1871 if (IFCAP_TXCSUM & ifp->if_capenable) 1872 ifp->if_hwassist |= JME_CSUM_FEATURES; 1873 else 1874 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 1875 } 1876 } 1877 if ((mask & IFCAP_RXCSUM) && 1878 (IFCAP_RXCSUM & ifp->if_capabilities)) { 1879 uint32_t reg; 1880 1881 ifp->if_capenable ^= IFCAP_RXCSUM; 1882 reg = CSR_READ_4(sc, JME_RXMAC); 1883 reg &= ~RXMAC_CSUM_ENB; 1884 if (ifp->if_capenable & IFCAP_RXCSUM) 1885 reg |= RXMAC_CSUM_ENB; 1886 CSR_WRITE_4(sc, JME_RXMAC, reg); 1887 } 1888 1889 if ((mask & IFCAP_VLAN_HWTAGGING) && 1890 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) { 1891 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1892 jme_set_vlan(sc); 1893 } 1894 break; 1895 1896 default: 1897 error = ether_ioctl(ifp, cmd, data); 1898 break; 1899 } 1900 return (error); 1901 } 1902 1903 static void 1904 jme_mac_config(struct jme_softc *sc) 1905 { 1906 struct mii_data *mii; 1907 uint32_t ghc, rxmac, txmac, txpause, gp1; 1908 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1909 1910 mii = device_get_softc(sc->jme_miibus); 1911 1912 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1913 DELAY(10); 1914 CSR_WRITE_4(sc, JME_GHC, 0); 1915 ghc = 0; 1916 rxmac = CSR_READ_4(sc, JME_RXMAC); 1917 rxmac &= ~RXMAC_FC_ENB; 1918 txmac = CSR_READ_4(sc, JME_TXMAC); 1919 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1920 txpause = CSR_READ_4(sc, JME_TXPFC); 1921 txpause &= ~TXPFC_PAUSE_ENB; 1922 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1923 ghc |= GHC_FULL_DUPLEX; 1924 rxmac &= ~RXMAC_COLL_DET_ENB; 1925 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1926 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1927 TXMAC_FRAME_BURST); 1928 #ifdef notyet 1929 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1930 txpause |= TXPFC_PAUSE_ENB; 1931 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1932 rxmac |= RXMAC_FC_ENB; 1933 #endif 1934 /* Disable retry transmit timer/retry limit. */ 1935 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1936 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1937 } else { 1938 rxmac |= RXMAC_COLL_DET_ENB; 1939 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1940 /* Enable retry transmit timer/retry limit. */ 1941 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1942 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1943 } 1944 1945 /* 1946 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1947 */ 1948 gp1 = CSR_READ_4(sc, JME_GPREG1); 1949 gp1 &= ~GPREG1_WA_HDX; 1950 1951 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1952 hdx = 1; 1953 1954 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1955 case IFM_10_T: 1956 ghc |= GHC_SPEED_10 | sc->jme_clksrc; 1957 if (hdx) 1958 gp1 |= GPREG1_WA_HDX; 1959 break; 1960 1961 case IFM_100_TX: 1962 ghc |= GHC_SPEED_100 | sc->jme_clksrc; 1963 if (hdx) 1964 gp1 |= GPREG1_WA_HDX; 1965 1966 /* 1967 * Use extended FIFO depth to workaround CRC errors 1968 * emitted by chips before JMC250B 1969 */ 1970 phyconf = JMPHY_CONF_EXTFIFO; 1971 break; 1972 1973 case IFM_1000_T: 1974 if (sc->jme_caps & JME_CAP_FASTETH) 1975 break; 1976 1977 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000; 1978 if (hdx) 1979 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1980 break; 1981 1982 default: 1983 break; 1984 } 1985 CSR_WRITE_4(sc, JME_GHC, ghc); 1986 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1987 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1988 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1989 1990 if (sc->jme_workaround & JME_WA_EXTFIFO) { 1991 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 1992 JMPHY_CONF, phyconf); 1993 } 1994 if (sc->jme_workaround & JME_WA_HDX) 1995 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1996 } 1997 1998 static void 1999 jme_intr(void *xsc) 2000 { 2001 struct jme_softc *sc = xsc; 2002 struct ifnet *ifp = &sc->arpcom.ac_if; 2003 uint32_t status; 2004 2005 ASSERT_SERIALIZED(ifp->if_serializer); 2006 2007 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2008 if (status == 0 || status == 0xFFFFFFFF) 2009 return; 2010 2011 /* Disable interrupts. */ 2012 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2013 2014 status = CSR_READ_4(sc, JME_INTR_STATUS); 2015 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2016 goto back; 2017 2018 /* Reset PCC counter/timer and Ack interrupts. */ 2019 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2020 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 2021 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2022 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2023 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 2024 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2025 2026 if (ifp->if_flags & IFF_RUNNING) { 2027 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 2028 jme_rxeof(sc); 2029 2030 if (status & INTR_RXQ_DESC_EMPTY) { 2031 /* 2032 * Notify hardware availability of new Rx buffers. 2033 * Reading RXCSR takes very long time under heavy 2034 * load so cache RXCSR value and writes the ORed 2035 * value with the kick command to the RXCSR. This 2036 * saves one register access cycle. 2037 */ 2038 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2039 RXCSR_RX_ENB | RXCSR_RXQ_START); 2040 } 2041 2042 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 2043 jme_txeof(sc); 2044 if (!ifq_is_empty(&ifp->if_snd)) 2045 if_devstart(ifp); 2046 } 2047 } 2048 back: 2049 /* Reenable interrupts. */ 2050 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2051 } 2052 2053 static void 2054 jme_txeof(struct jme_softc *sc) 2055 { 2056 struct ifnet *ifp = &sc->arpcom.ac_if; 2057 struct jme_txdesc *txd; 2058 uint32_t status; 2059 int cons, nsegs; 2060 2061 cons = sc->jme_cdata.jme_tx_cons; 2062 if (cons == sc->jme_cdata.jme_tx_prod) 2063 return; 2064 2065 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2066 sc->jme_cdata.jme_tx_ring_map, 2067 BUS_DMASYNC_POSTREAD); 2068 2069 /* 2070 * Go through our Tx list and free mbufs for those 2071 * frames which have been transmitted. 2072 */ 2073 while (cons != sc->jme_cdata.jme_tx_prod) { 2074 txd = &sc->jme_cdata.jme_txdesc[cons]; 2075 KASSERT(txd->tx_m != NULL, 2076 ("%s: freeing NULL mbuf!\n", __func__)); 2077 2078 status = le32toh(txd->tx_desc->flags); 2079 if ((status & JME_TD_OWN) == JME_TD_OWN) 2080 break; 2081 2082 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 2083 ifp->if_oerrors++; 2084 } else { 2085 ifp->if_opackets++; 2086 if (status & JME_TD_COLLISION) { 2087 ifp->if_collisions += 2088 le32toh(txd->tx_desc->buflen) & 2089 JME_TD_BUF_LEN_MASK; 2090 } 2091 } 2092 2093 /* 2094 * Only the first descriptor of multi-descriptor 2095 * transmission is updated so driver have to skip entire 2096 * chained buffers for the transmiited frame. In other 2097 * words, JME_TD_OWN bit is valid only at the first 2098 * descriptor of a multi-descriptor transmission. 2099 */ 2100 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2101 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 2102 JME_DESC_INC(cons, sc->jme_tx_desc_cnt); 2103 } 2104 2105 /* Reclaim transferred mbufs. */ 2106 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2107 m_freem(txd->tx_m); 2108 txd->tx_m = NULL; 2109 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2110 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2111 ("%s: Active Tx desc counter was garbled\n", __func__)); 2112 txd->tx_ndesc = 0; 2113 } 2114 sc->jme_cdata.jme_tx_cons = cons; 2115 2116 if (sc->jme_cdata.jme_tx_cnt == 0) 2117 ifp->if_timer = 0; 2118 2119 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 2120 sc->jme_tx_desc_cnt - JME_TXD_RSVD) 2121 ifp->if_flags &= ~IFF_OACTIVE; 2122 2123 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2124 sc->jme_cdata.jme_tx_ring_map, 2125 BUS_DMASYNC_PREWRITE); 2126 } 2127 2128 static __inline void 2129 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 2130 { 2131 int i; 2132 2133 for (i = 0; i < count; ++i) { 2134 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 2135 2136 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2137 desc->buflen = htole32(MCLBYTES); 2138 JME_DESC_INC(cons, sc->jme_rx_desc_cnt); 2139 } 2140 } 2141 2142 /* Receive a frame. */ 2143 static void 2144 jme_rxpkt(struct jme_softc *sc) 2145 { 2146 struct ifnet *ifp = &sc->arpcom.ac_if; 2147 struct jme_desc *desc; 2148 struct jme_rxdesc *rxd; 2149 struct mbuf *mp, *m; 2150 uint32_t flags, status; 2151 int cons, count, nsegs; 2152 2153 cons = sc->jme_cdata.jme_rx_cons; 2154 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2155 flags = le32toh(desc->flags); 2156 status = le32toh(desc->buflen); 2157 nsegs = JME_RX_NSEGS(status); 2158 2159 if (status & JME_RX_ERR_STAT) { 2160 ifp->if_ierrors++; 2161 jme_discard_rxbufs(sc, cons, nsegs); 2162 #ifdef JME_SHOW_ERRORS 2163 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2164 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2165 #endif 2166 sc->jme_cdata.jme_rx_cons += nsegs; 2167 sc->jme_cdata.jme_rx_cons %= sc->jme_rx_desc_cnt; 2168 return; 2169 } 2170 2171 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2172 for (count = 0; count < nsegs; count++, 2173 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) { 2174 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 2175 mp = rxd->rx_m; 2176 2177 /* Add a new receive buffer to the ring. */ 2178 if (jme_newbuf(sc, rxd, 0) != 0) { 2179 ifp->if_iqdrops++; 2180 /* Reuse buffer. */ 2181 jme_discard_rxbufs(sc, cons, nsegs - count); 2182 if (sc->jme_cdata.jme_rxhead != NULL) { 2183 m_freem(sc->jme_cdata.jme_rxhead); 2184 JME_RXCHAIN_RESET(sc); 2185 } 2186 break; 2187 } 2188 2189 /* 2190 * Assume we've received a full sized frame. 2191 * Actual size is fixed when we encounter the end of 2192 * multi-segmented frame. 2193 */ 2194 mp->m_len = MCLBYTES; 2195 2196 /* Chain received mbufs. */ 2197 if (sc->jme_cdata.jme_rxhead == NULL) { 2198 sc->jme_cdata.jme_rxhead = mp; 2199 sc->jme_cdata.jme_rxtail = mp; 2200 } else { 2201 /* 2202 * Receive processor can receive a maximum frame 2203 * size of 65535 bytes. 2204 */ 2205 mp->m_flags &= ~M_PKTHDR; 2206 sc->jme_cdata.jme_rxtail->m_next = mp; 2207 sc->jme_cdata.jme_rxtail = mp; 2208 } 2209 2210 if (count == nsegs - 1) { 2211 /* Last desc. for this frame. */ 2212 m = sc->jme_cdata.jme_rxhead; 2213 /* XXX assert PKTHDR? */ 2214 m->m_flags |= M_PKTHDR; 2215 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 2216 if (nsegs > 1) { 2217 /* Set first mbuf size. */ 2218 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2219 /* Set last mbuf size. */ 2220 mp->m_len = sc->jme_cdata.jme_rxlen - 2221 ((MCLBYTES - JME_RX_PAD_BYTES) + 2222 (MCLBYTES * (nsegs - 2))); 2223 } else { 2224 m->m_len = sc->jme_cdata.jme_rxlen; 2225 } 2226 m->m_pkthdr.rcvif = ifp; 2227 2228 /* 2229 * Account for 10bytes auto padding which is used 2230 * to align IP header on 32bit boundary. Also note, 2231 * CRC bytes is automatically removed by the 2232 * hardware. 2233 */ 2234 m->m_data += JME_RX_PAD_BYTES; 2235 2236 /* Set checksum information. */ 2237 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2238 (flags & JME_RD_IPV4)) { 2239 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2240 if (flags & JME_RD_IPCSUM) 2241 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2242 if ((flags & JME_RD_MORE_FRAG) == 0 && 2243 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2244 (JME_RD_TCP | JME_RD_TCPCSUM) || 2245 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2246 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2247 m->m_pkthdr.csum_flags |= 2248 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2249 m->m_pkthdr.csum_data = 0xffff; 2250 } 2251 } 2252 2253 /* Check for VLAN tagged packets. */ 2254 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && 2255 (flags & JME_RD_VLAN_TAG)) { 2256 m->m_pkthdr.ether_vlantag = 2257 flags & JME_RD_VLAN_MASK; 2258 m->m_flags |= M_VLANTAG; 2259 } 2260 2261 ifp->if_ipackets++; 2262 /* Pass it on. */ 2263 ifp->if_input(ifp, m); 2264 2265 /* Reset mbuf chains. */ 2266 JME_RXCHAIN_RESET(sc); 2267 } 2268 } 2269 2270 sc->jme_cdata.jme_rx_cons += nsegs; 2271 sc->jme_cdata.jme_rx_cons %= sc->jme_rx_desc_cnt; 2272 } 2273 2274 static void 2275 jme_rxeof(struct jme_softc *sc) 2276 { 2277 struct jme_desc *desc; 2278 int nsegs, prog, pktlen; 2279 2280 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2281 sc->jme_cdata.jme_rx_ring_map, 2282 BUS_DMASYNC_POSTREAD); 2283 2284 prog = 0; 2285 for (;;) { 2286 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 2287 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2288 break; 2289 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2290 break; 2291 2292 /* 2293 * Check number of segments against received bytes. 2294 * Non-matching value would indicate that hardware 2295 * is still trying to update Rx descriptors. I'm not 2296 * sure whether this check is needed. 2297 */ 2298 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2299 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2300 if (nsegs != howmany(pktlen, MCLBYTES)) { 2301 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) " 2302 "and packet size(%d) mismach\n", 2303 nsegs, pktlen); 2304 break; 2305 } 2306 2307 /* Received a frame. */ 2308 jme_rxpkt(sc); 2309 prog++; 2310 } 2311 2312 if (prog > 0) { 2313 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2314 sc->jme_cdata.jme_rx_ring_map, 2315 BUS_DMASYNC_PREWRITE); 2316 } 2317 } 2318 2319 static void 2320 jme_tick(void *xsc) 2321 { 2322 struct jme_softc *sc = xsc; 2323 struct ifnet *ifp = &sc->arpcom.ac_if; 2324 struct mii_data *mii = device_get_softc(sc->jme_miibus); 2325 2326 lwkt_serialize_enter(ifp->if_serializer); 2327 2328 mii_tick(mii); 2329 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2330 2331 lwkt_serialize_exit(ifp->if_serializer); 2332 } 2333 2334 static void 2335 jme_reset(struct jme_softc *sc) 2336 { 2337 #ifdef foo 2338 /* Stop receiver, transmitter. */ 2339 jme_stop_rx(sc); 2340 jme_stop_tx(sc); 2341 #endif 2342 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2343 DELAY(10); 2344 CSR_WRITE_4(sc, JME_GHC, 0); 2345 } 2346 2347 static void 2348 jme_init(void *xsc) 2349 { 2350 struct jme_softc *sc = xsc; 2351 struct ifnet *ifp = &sc->arpcom.ac_if; 2352 struct mii_data *mii; 2353 uint8_t eaddr[ETHER_ADDR_LEN]; 2354 bus_addr_t paddr; 2355 uint32_t reg; 2356 int error; 2357 2358 ASSERT_SERIALIZED(ifp->if_serializer); 2359 2360 /* 2361 * Cancel any pending I/O. 2362 */ 2363 jme_stop(sc); 2364 2365 /* 2366 * Reset the chip to a known state. 2367 */ 2368 jme_reset(sc); 2369 2370 sc->jme_txd_spare = 2371 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES); 2372 KKASSERT(sc->jme_txd_spare >= 1); 2373 2374 /* 2375 * If we use 64bit address mode for transmitting, each Tx request 2376 * needs one more symbol descriptor. 2377 */ 2378 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) 2379 sc->jme_txd_spare += 1; 2380 2381 /* Init descriptors. */ 2382 error = jme_init_rx_ring(sc); 2383 if (error != 0) { 2384 device_printf(sc->jme_dev, 2385 "%s: initialization failed: no memory for Rx buffers.\n", 2386 __func__); 2387 jme_stop(sc); 2388 return; 2389 } 2390 jme_init_tx_ring(sc); 2391 2392 /* Initialize shadow status block. */ 2393 jme_init_ssb(sc); 2394 2395 /* Reprogram the station address. */ 2396 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2397 CSR_WRITE_4(sc, JME_PAR0, 2398 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 2399 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 2400 2401 /* 2402 * Configure Tx queue. 2403 * Tx priority queue weight value : 0 2404 * Tx FIFO threshold for processing next packet : 16QW 2405 * Maximum Tx DMA length : 512 2406 * Allow Tx DMA burst. 2407 */ 2408 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2409 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2410 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2411 sc->jme_txcsr |= sc->jme_tx_dma_size; 2412 sc->jme_txcsr |= TXCSR_DMA_BURST; 2413 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2414 2415 /* Set Tx descriptor counter. */ 2416 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt); 2417 2418 /* Set Tx ring address to the hardware. */ 2419 paddr = JME_TX_RING_ADDR(sc, 0); 2420 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2421 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2422 2423 /* Configure TxMAC parameters. */ 2424 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2425 reg |= TXMAC_THRESH_1_PKT; 2426 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2427 CSR_WRITE_4(sc, JME_TXMAC, reg); 2428 2429 /* 2430 * Configure Rx queue. 2431 * FIFO full threshold for transmitting Tx pause packet : 128T 2432 * FIFO threshold for processing next packet : 128QW 2433 * Rx queue 0 select 2434 * Max Rx DMA length : 128 2435 * Rx descriptor retry : 32 2436 * Rx descriptor retry time gap : 256ns 2437 * Don't receive runt/bad frame. 2438 */ 2439 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2440 /* 2441 * Since Rx FIFO size is 4K bytes, receiving frames larger 2442 * than 4K bytes will suffer from Rx FIFO overruns. So 2443 * decrease FIFO threshold to reduce the FIFO overruns for 2444 * frames larger than 4000 bytes. 2445 * For best performance of standard MTU sized frames use 2446 * maximum allowable FIFO threshold, 128QW. 2447 */ 2448 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) > 2449 JME_RX_FIFO_SIZE) 2450 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2451 else 2452 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2453 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 2454 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2455 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2456 /* XXX TODO DROP_BAD */ 2457 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2458 2459 /* Set Rx descriptor counter. */ 2460 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt); 2461 2462 /* Set Rx ring address to the hardware. */ 2463 paddr = JME_RX_RING_ADDR(sc, 0); 2464 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2465 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2466 2467 /* Clear receive filter. */ 2468 CSR_WRITE_4(sc, JME_RXMAC, 0); 2469 2470 /* Set up the receive filter. */ 2471 jme_set_filter(sc); 2472 jme_set_vlan(sc); 2473 2474 /* 2475 * Disable all WOL bits as WOL can interfere normal Rx 2476 * operation. Also clear WOL detection status bits. 2477 */ 2478 reg = CSR_READ_4(sc, JME_PMCS); 2479 reg &= ~PMCS_WOL_ENB_MASK; 2480 CSR_WRITE_4(sc, JME_PMCS, reg); 2481 2482 /* 2483 * Pad 10bytes right before received frame. This will greatly 2484 * help Rx performance on strict-alignment architectures as 2485 * it does not need to copy the frame to align the payload. 2486 */ 2487 reg = CSR_READ_4(sc, JME_RXMAC); 2488 reg |= RXMAC_PAD_10BYTES; 2489 2490 if (ifp->if_capenable & IFCAP_RXCSUM) 2491 reg |= RXMAC_CSUM_ENB; 2492 CSR_WRITE_4(sc, JME_RXMAC, reg); 2493 2494 /* Configure general purpose reg0 */ 2495 reg = CSR_READ_4(sc, JME_GPREG0); 2496 reg &= ~GPREG0_PCC_UNIT_MASK; 2497 /* Set PCC timer resolution to micro-seconds unit. */ 2498 reg |= GPREG0_PCC_UNIT_US; 2499 /* 2500 * Disable all shadow register posting as we have to read 2501 * JME_INTR_STATUS register in jme_intr. Also it seems 2502 * that it's hard to synchronize interrupt status between 2503 * hardware and software with shadow posting due to 2504 * requirements of bus_dmamap_sync(9). 2505 */ 2506 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2507 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2508 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2509 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2510 /* Disable posting of DW0. */ 2511 reg &= ~GPREG0_POST_DW0_ENB; 2512 /* Clear PME message. */ 2513 reg &= ~GPREG0_PME_ENB; 2514 /* Set PHY address. */ 2515 reg &= ~GPREG0_PHY_ADDR_MASK; 2516 reg |= sc->jme_phyaddr; 2517 CSR_WRITE_4(sc, JME_GPREG0, reg); 2518 2519 /* Configure Tx queue 0 packet completion coalescing. */ 2520 jme_set_tx_coal(sc); 2521 2522 /* Configure Rx queue 0 packet completion coalescing. */ 2523 jme_set_rx_coal(sc); 2524 2525 /* Configure shadow status block but don't enable posting. */ 2526 paddr = sc->jme_rdata.jme_ssb_block_paddr; 2527 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2528 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2529 2530 /* Disable Timer 1 and Timer 2. */ 2531 CSR_WRITE_4(sc, JME_TIMER1, 0); 2532 CSR_WRITE_4(sc, JME_TIMER2, 0); 2533 2534 /* Configure retry transmit period, retry limit value. */ 2535 CSR_WRITE_4(sc, JME_TXTRHD, 2536 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2537 TXTRHD_RT_PERIOD_MASK) | 2538 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2539 TXTRHD_RT_LIMIT_SHIFT)); 2540 2541 /* Disable RSS. */ 2542 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 2543 2544 /* Initialize the interrupt mask. */ 2545 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2546 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2547 2548 /* 2549 * Enabling Tx/Rx DMA engines and Rx queue processing is 2550 * done after detection of valid link in jme_miibus_statchg. 2551 */ 2552 sc->jme_flags &= ~JME_FLAG_LINK; 2553 2554 /* Set the current media. */ 2555 mii = device_get_softc(sc->jme_miibus); 2556 mii_mediachg(mii); 2557 2558 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2559 2560 ifp->if_flags |= IFF_RUNNING; 2561 ifp->if_flags &= ~IFF_OACTIVE; 2562 } 2563 2564 static void 2565 jme_stop(struct jme_softc *sc) 2566 { 2567 struct ifnet *ifp = &sc->arpcom.ac_if; 2568 struct jme_txdesc *txd; 2569 struct jme_rxdesc *rxd; 2570 int i; 2571 2572 ASSERT_SERIALIZED(ifp->if_serializer); 2573 2574 /* 2575 * Mark the interface down and cancel the watchdog timer. 2576 */ 2577 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2578 ifp->if_timer = 0; 2579 2580 callout_stop(&sc->jme_tick_ch); 2581 sc->jme_flags &= ~JME_FLAG_LINK; 2582 2583 /* 2584 * Disable interrupts. 2585 */ 2586 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2587 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2588 2589 /* Disable updating shadow status block. */ 2590 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2591 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2592 2593 /* Stop receiver, transmitter. */ 2594 jme_stop_rx(sc); 2595 jme_stop_tx(sc); 2596 2597 #ifdef foo 2598 /* Reclaim Rx/Tx buffers that have been completed. */ 2599 jme_rxeof(sc); 2600 if (sc->jme_cdata.jme_rxhead != NULL) 2601 m_freem(sc->jme_cdata.jme_rxhead); 2602 JME_RXCHAIN_RESET(sc); 2603 jme_txeof(sc); 2604 #endif 2605 2606 /* 2607 * Free partial finished RX segments 2608 */ 2609 if (sc->jme_cdata.jme_rxhead != NULL) 2610 m_freem(sc->jme_cdata.jme_rxhead); 2611 JME_RXCHAIN_RESET(sc); 2612 2613 /* 2614 * Free RX and TX mbufs still in the queues. 2615 */ 2616 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 2617 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2618 if (rxd->rx_m != NULL) { 2619 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 2620 rxd->rx_dmamap); 2621 m_freem(rxd->rx_m); 2622 rxd->rx_m = NULL; 2623 } 2624 } 2625 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 2626 txd = &sc->jme_cdata.jme_txdesc[i]; 2627 if (txd->tx_m != NULL) { 2628 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 2629 txd->tx_dmamap); 2630 m_freem(txd->tx_m); 2631 txd->tx_m = NULL; 2632 txd->tx_ndesc = 0; 2633 } 2634 } 2635 } 2636 2637 static void 2638 jme_stop_tx(struct jme_softc *sc) 2639 { 2640 uint32_t reg; 2641 int i; 2642 2643 reg = CSR_READ_4(sc, JME_TXCSR); 2644 if ((reg & TXCSR_TX_ENB) == 0) 2645 return; 2646 reg &= ~TXCSR_TX_ENB; 2647 CSR_WRITE_4(sc, JME_TXCSR, reg); 2648 for (i = JME_TIMEOUT; i > 0; i--) { 2649 DELAY(1); 2650 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2651 break; 2652 } 2653 if (i == 0) 2654 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 2655 } 2656 2657 static void 2658 jme_stop_rx(struct jme_softc *sc) 2659 { 2660 uint32_t reg; 2661 int i; 2662 2663 reg = CSR_READ_4(sc, JME_RXCSR); 2664 if ((reg & RXCSR_RX_ENB) == 0) 2665 return; 2666 reg &= ~RXCSR_RX_ENB; 2667 CSR_WRITE_4(sc, JME_RXCSR, reg); 2668 for (i = JME_TIMEOUT; i > 0; i--) { 2669 DELAY(1); 2670 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2671 break; 2672 } 2673 if (i == 0) 2674 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 2675 } 2676 2677 static void 2678 jme_init_tx_ring(struct jme_softc *sc) 2679 { 2680 struct jme_ring_data *rd; 2681 struct jme_txdesc *txd; 2682 int i; 2683 2684 sc->jme_cdata.jme_tx_prod = 0; 2685 sc->jme_cdata.jme_tx_cons = 0; 2686 sc->jme_cdata.jme_tx_cnt = 0; 2687 2688 rd = &sc->jme_rdata; 2689 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE(sc)); 2690 for (i = 0; i < sc->jme_tx_desc_cnt; i++) { 2691 txd = &sc->jme_cdata.jme_txdesc[i]; 2692 txd->tx_m = NULL; 2693 txd->tx_desc = &rd->jme_tx_ring[i]; 2694 txd->tx_ndesc = 0; 2695 } 2696 2697 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2698 sc->jme_cdata.jme_tx_ring_map, 2699 BUS_DMASYNC_PREWRITE); 2700 } 2701 2702 static void 2703 jme_init_ssb(struct jme_softc *sc) 2704 { 2705 struct jme_ring_data *rd; 2706 2707 rd = &sc->jme_rdata; 2708 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2709 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, 2710 BUS_DMASYNC_PREWRITE); 2711 } 2712 2713 static int 2714 jme_init_rx_ring(struct jme_softc *sc) 2715 { 2716 struct jme_ring_data *rd; 2717 struct jme_rxdesc *rxd; 2718 int i; 2719 2720 KKASSERT(sc->jme_cdata.jme_rxhead == NULL && 2721 sc->jme_cdata.jme_rxtail == NULL && 2722 sc->jme_cdata.jme_rxlen == 0); 2723 sc->jme_cdata.jme_rx_cons = 0; 2724 2725 rd = &sc->jme_rdata; 2726 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE(sc)); 2727 for (i = 0; i < sc->jme_rx_desc_cnt; i++) { 2728 int error; 2729 2730 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2731 rxd->rx_m = NULL; 2732 rxd->rx_desc = &rd->jme_rx_ring[i]; 2733 error = jme_newbuf(sc, rxd, 1); 2734 if (error) 2735 return (error); 2736 } 2737 2738 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2739 sc->jme_cdata.jme_rx_ring_map, 2740 BUS_DMASYNC_PREWRITE); 2741 return (0); 2742 } 2743 2744 static int 2745 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init) 2746 { 2747 struct jme_desc *desc; 2748 struct mbuf *m; 2749 struct jme_dmamap_ctx ctx; 2750 bus_dma_segment_t segs; 2751 bus_dmamap_t map; 2752 int error; 2753 2754 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2755 if (m == NULL) 2756 return (ENOBUFS); 2757 /* 2758 * JMC250 has 64bit boundary alignment limitation so jme(4) 2759 * takes advantage of 10 bytes padding feature of hardware 2760 * in order not to copy entire frame to align IP header on 2761 * 32bit boundary. 2762 */ 2763 m->m_len = m->m_pkthdr.len = MCLBYTES; 2764 2765 ctx.nsegs = 1; 2766 ctx.segs = &segs; 2767 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag, 2768 sc->jme_cdata.jme_rx_sparemap, 2769 m, jme_dmamap_buf_cb, &ctx, 2770 BUS_DMA_NOWAIT); 2771 if (error || ctx.nsegs == 0) { 2772 if (!error) { 2773 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 2774 sc->jme_cdata.jme_rx_sparemap); 2775 error = EFBIG; 2776 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2777 } 2778 m_freem(m); 2779 2780 if (init) 2781 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2782 return (error); 2783 } 2784 2785 if (rxd->rx_m != NULL) { 2786 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 2787 BUS_DMASYNC_POSTREAD); 2788 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); 2789 } 2790 map = rxd->rx_dmamap; 2791 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2792 sc->jme_cdata.jme_rx_sparemap = map; 2793 rxd->rx_m = m; 2794 2795 desc = rxd->rx_desc; 2796 desc->buflen = htole32(segs.ds_len); 2797 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr)); 2798 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr)); 2799 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2800 2801 return (0); 2802 } 2803 2804 static void 2805 jme_set_vlan(struct jme_softc *sc) 2806 { 2807 struct ifnet *ifp = &sc->arpcom.ac_if; 2808 uint32_t reg; 2809 2810 ASSERT_SERIALIZED(ifp->if_serializer); 2811 2812 reg = CSR_READ_4(sc, JME_RXMAC); 2813 reg &= ~RXMAC_VLAN_ENB; 2814 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 2815 reg |= RXMAC_VLAN_ENB; 2816 CSR_WRITE_4(sc, JME_RXMAC, reg); 2817 } 2818 2819 static void 2820 jme_set_filter(struct jme_softc *sc) 2821 { 2822 struct ifnet *ifp = &sc->arpcom.ac_if; 2823 struct ifmultiaddr *ifma; 2824 uint32_t crc; 2825 uint32_t mchash[2]; 2826 uint32_t rxcfg; 2827 2828 ASSERT_SERIALIZED(ifp->if_serializer); 2829 2830 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2831 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2832 RXMAC_ALLMULTI); 2833 2834 /* 2835 * Always accept frames destined to our station address. 2836 * Always accept broadcast frames. 2837 */ 2838 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2839 2840 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2841 if (ifp->if_flags & IFF_PROMISC) 2842 rxcfg |= RXMAC_PROMISC; 2843 if (ifp->if_flags & IFF_ALLMULTI) 2844 rxcfg |= RXMAC_ALLMULTI; 2845 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 2846 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 2847 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2848 return; 2849 } 2850 2851 /* 2852 * Set up the multicast address filter by passing all multicast 2853 * addresses through a CRC generator, and then using the low-order 2854 * 6 bits as an index into the 64 bit multicast hash table. The 2855 * high order bits select the register, while the rest of the bits 2856 * select the bit within the register. 2857 */ 2858 rxcfg |= RXMAC_MULTICAST; 2859 bzero(mchash, sizeof(mchash)); 2860 2861 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2862 if (ifma->ifma_addr->sa_family != AF_LINK) 2863 continue; 2864 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2865 ifma->ifma_addr), ETHER_ADDR_LEN); 2866 2867 /* Just want the 6 least significant bits. */ 2868 crc &= 0x3f; 2869 2870 /* Set the corresponding bit in the hash table. */ 2871 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2872 } 2873 2874 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2875 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2876 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2877 } 2878 2879 static int 2880 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS) 2881 { 2882 struct jme_softc *sc = arg1; 2883 struct ifnet *ifp = &sc->arpcom.ac_if; 2884 int error, v; 2885 2886 lwkt_serialize_enter(ifp->if_serializer); 2887 2888 v = sc->jme_tx_coal_to; 2889 error = sysctl_handle_int(oidp, &v, 0, req); 2890 if (error || req->newptr == NULL) 2891 goto back; 2892 2893 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) { 2894 error = EINVAL; 2895 goto back; 2896 } 2897 2898 if (v != sc->jme_tx_coal_to) { 2899 sc->jme_tx_coal_to = v; 2900 if (ifp->if_flags & IFF_RUNNING) 2901 jme_set_tx_coal(sc); 2902 } 2903 back: 2904 lwkt_serialize_exit(ifp->if_serializer); 2905 return error; 2906 } 2907 2908 static int 2909 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 2910 { 2911 struct jme_softc *sc = arg1; 2912 struct ifnet *ifp = &sc->arpcom.ac_if; 2913 int error, v; 2914 2915 lwkt_serialize_enter(ifp->if_serializer); 2916 2917 v = sc->jme_tx_coal_pkt; 2918 error = sysctl_handle_int(oidp, &v, 0, req); 2919 if (error || req->newptr == NULL) 2920 goto back; 2921 2922 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) { 2923 error = EINVAL; 2924 goto back; 2925 } 2926 2927 if (v != sc->jme_tx_coal_pkt) { 2928 sc->jme_tx_coal_pkt = v; 2929 if (ifp->if_flags & IFF_RUNNING) 2930 jme_set_tx_coal(sc); 2931 } 2932 back: 2933 lwkt_serialize_exit(ifp->if_serializer); 2934 return error; 2935 } 2936 2937 static int 2938 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS) 2939 { 2940 struct jme_softc *sc = arg1; 2941 struct ifnet *ifp = &sc->arpcom.ac_if; 2942 int error, v; 2943 2944 lwkt_serialize_enter(ifp->if_serializer); 2945 2946 v = sc->jme_rx_coal_to; 2947 error = sysctl_handle_int(oidp, &v, 0, req); 2948 if (error || req->newptr == NULL) 2949 goto back; 2950 2951 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) { 2952 error = EINVAL; 2953 goto back; 2954 } 2955 2956 if (v != sc->jme_rx_coal_to) { 2957 sc->jme_rx_coal_to = v; 2958 if (ifp->if_flags & IFF_RUNNING) 2959 jme_set_rx_coal(sc); 2960 } 2961 back: 2962 lwkt_serialize_exit(ifp->if_serializer); 2963 return error; 2964 } 2965 2966 static int 2967 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 2968 { 2969 struct jme_softc *sc = arg1; 2970 struct ifnet *ifp = &sc->arpcom.ac_if; 2971 int error, v; 2972 2973 lwkt_serialize_enter(ifp->if_serializer); 2974 2975 v = sc->jme_rx_coal_pkt; 2976 error = sysctl_handle_int(oidp, &v, 0, req); 2977 if (error || req->newptr == NULL) 2978 goto back; 2979 2980 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) { 2981 error = EINVAL; 2982 goto back; 2983 } 2984 2985 if (v != sc->jme_rx_coal_pkt) { 2986 sc->jme_rx_coal_pkt = v; 2987 if (ifp->if_flags & IFF_RUNNING) 2988 jme_set_rx_coal(sc); 2989 } 2990 back: 2991 lwkt_serialize_exit(ifp->if_serializer); 2992 return error; 2993 } 2994 2995 static void 2996 jme_set_tx_coal(struct jme_softc *sc) 2997 { 2998 uint32_t reg; 2999 3000 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 3001 PCCTX_COAL_TO_MASK; 3002 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 3003 PCCTX_COAL_PKT_MASK; 3004 reg |= PCCTX_COAL_TXQ0; 3005 CSR_WRITE_4(sc, JME_PCCTX, reg); 3006 } 3007 3008 static void 3009 jme_set_rx_coal(struct jme_softc *sc) 3010 { 3011 uint32_t reg; 3012 3013 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 3014 PCCRX_COAL_TO_MASK; 3015 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 3016 PCCRX_COAL_PKT_MASK; 3017 CSR_WRITE_4(sc, JME_PCCRX0, reg); 3018 } 3019