1 /* $OpenBSD: if_jme.c,v 1.18 2009/03/29 21:53:52 sthen Exp $ */ 2 /*- 3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $ 30 */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <net/if_types.h> 62 #include <net/if_vlan_var.h> 63 64 #if NBPFILTER > 0 65 #include <net/bpf.h> 66 #endif 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/mii/jmphyreg.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_jmereg.h> 77 #include <dev/pci/if_jmevar.h> 78 79 /* Define the following to disable printing Rx errors. */ 80 #undef JME_SHOW_ERRORS 81 82 int jme_match(struct device *, void *, void *); 83 void jme_attach(struct device *, struct device *, void *); 84 int jme_detach(struct device *, int); 85 86 int jme_miibus_readreg(struct device *, int, int); 87 void jme_miibus_writereg(struct device *, int, int, int); 88 void jme_miibus_statchg(struct device *); 89 90 int jme_init(struct ifnet *); 91 int jme_ioctl(struct ifnet *, u_long, caddr_t); 92 93 void jme_start(struct ifnet *); 94 void jme_watchdog(struct ifnet *); 95 void jme_mediastatus(struct ifnet *, struct ifmediareq *); 96 int jme_mediachange(struct ifnet *); 97 98 int jme_intr(void *); 99 void jme_txeof(struct jme_softc *); 100 void jme_rxeof(struct jme_softc *); 101 102 int jme_dma_alloc(struct jme_softc *); 103 void jme_dma_free(struct jme_softc *); 104 int jme_init_rx_ring(struct jme_softc *); 105 void jme_init_tx_ring(struct jme_softc *); 106 void jme_init_ssb(struct jme_softc *); 107 int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int); 108 int jme_encap(struct jme_softc *, struct mbuf **); 109 void jme_rxpkt(struct jme_softc *); 110 111 void jme_tick(void *); 112 void jme_stop(struct jme_softc *); 113 void jme_reset(struct jme_softc *); 114 void jme_set_vlan(struct jme_softc *); 115 void jme_set_filter(struct jme_softc *); 116 void jme_stop_tx(struct jme_softc *); 117 void jme_stop_rx(struct jme_softc *); 118 void jme_mac_config(struct jme_softc *); 119 void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 120 int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 121 int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 122 void jme_discard_rxbufs(struct jme_softc *, int, int); 123 #ifdef notyet 124 void jme_setwol(struct jme_softc *); 125 void jme_setlinkspeed(struct jme_softc *); 126 #endif 127 128 /* 129 * Devices supported by this driver. 130 */ 131 const struct pci_matchid jme_devices[] = { 132 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 }, 133 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 } 134 }; 135 136 struct cfattach jme_ca = { 137 sizeof (struct jme_softc), jme_match, jme_attach 138 }; 139 140 struct cfdriver jme_cd = { 141 NULL, "jme", DV_IFNET 142 }; 143 144 int jmedebug = 0; 145 #define DPRINTF(x) do { if (jmedebug) printf x; } while (0) 146 147 /* 148 * Read a PHY register on the MII of the JMC250. 149 */ 150 int 151 jme_miibus_readreg(struct device *dev, int phy, int reg) 152 { 153 struct jme_softc *sc = (struct jme_softc *)dev; 154 uint32_t val; 155 int i; 156 157 /* For FPGA version, PHY address 0 should be ignored. */ 158 if (sc->jme_caps & JME_CAP_FPGA) { 159 if (phy == 0) 160 return (0); 161 } else { 162 if (sc->jme_phyaddr != phy) 163 return (0); 164 } 165 166 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 167 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 168 169 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 170 DELAY(1); 171 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 172 break; 173 } 174 if (i == 0) { 175 printf("%s: phy read timeout: phy %d, reg %d\n", 176 sc->sc_dev.dv_xname, phy, reg); 177 return (0); 178 } 179 180 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 181 } 182 183 /* 184 * Write a PHY register on the MII of the JMC250. 185 */ 186 void 187 jme_miibus_writereg(struct device *dev, int phy, int reg, int val) 188 { 189 struct jme_softc *sc = (struct jme_softc *)dev; 190 int i; 191 192 /* For FPGA version, PHY address 0 should be ignored. */ 193 if (sc->jme_caps & JME_CAP_FPGA) { 194 if (phy == 0) 195 return; 196 } else { 197 if (sc->jme_phyaddr != phy) 198 return; 199 } 200 201 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 202 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 203 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 204 205 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 206 DELAY(1); 207 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 208 break; 209 } 210 if (i == 0) { 211 printf("%s: phy write timeout: phy %d, reg %d\n", 212 sc->sc_dev.dv_xname, phy, reg); 213 } 214 } 215 216 /* 217 * Callback from MII layer when media changes. 218 */ 219 void 220 jme_miibus_statchg(struct device *dev) 221 { 222 struct jme_softc *sc = (struct jme_softc *)dev; 223 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 224 struct mii_data *mii; 225 struct jme_txdesc *txd; 226 bus_addr_t paddr; 227 int i; 228 229 if ((ifp->if_flags & IFF_RUNNING) == 0) 230 return; 231 232 mii = &sc->sc_miibus; 233 234 sc->jme_flags &= ~JME_FLAG_LINK; 235 if ((mii->mii_media_status & IFM_AVALID) != 0) { 236 switch (IFM_SUBTYPE(mii->mii_media_active)) { 237 case IFM_10_T: 238 case IFM_100_TX: 239 sc->jme_flags |= JME_FLAG_LINK; 240 break; 241 case IFM_1000_T: 242 if (sc->jme_caps & JME_CAP_FASTETH) 243 break; 244 sc->jme_flags |= JME_FLAG_LINK; 245 break; 246 default: 247 break; 248 } 249 } 250 251 /* 252 * Disabling Rx/Tx MACs have a side-effect of resetting 253 * JME_TXNDA/JME_RXNDA register to the first address of 254 * Tx/Rx descriptor address. So driver should reset its 255 * internal procucer/consumer pointer and reclaim any 256 * allocated resources. Note, just saving the value of 257 * JME_TXNDA and JME_RXNDA registers before stopping MAC 258 * and restoring JME_TXNDA/JME_RXNDA register is not 259 * sufficient to make sure correct MAC state because 260 * stopping MAC operation can take a while and hardware 261 * might have updated JME_TXNDA/JME_RXNDA registers 262 * during the stop operation. 263 */ 264 265 /* Disable interrupts */ 266 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 267 268 /* Stop driver */ 269 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 270 ifp->if_timer = 0; 271 timeout_del(&sc->jme_tick_ch); 272 273 /* Stop receiver/transmitter. */ 274 jme_stop_rx(sc); 275 jme_stop_tx(sc); 276 277 jme_rxeof(sc); 278 if (sc->jme_cdata.jme_rxhead != NULL) 279 m_freem(sc->jme_cdata.jme_rxhead); 280 JME_RXCHAIN_RESET(sc); 281 282 jme_txeof(sc); 283 if (sc->jme_cdata.jme_tx_cnt != 0) { 284 /* Remove queued packets for transmit. */ 285 for (i = 0; i < JME_TX_RING_CNT; i++) { 286 txd = &sc->jme_cdata.jme_txdesc[i]; 287 if (txd->tx_m != NULL) { 288 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 289 m_freem(txd->tx_m); 290 txd->tx_m = NULL; 291 txd->tx_ndesc = 0; 292 ifp->if_oerrors++; 293 } 294 } 295 } 296 297 /* 298 * Reuse configured Rx descriptors and reset 299 * procuder/consumer index. 300 */ 301 sc->jme_cdata.jme_rx_cons = 0; 302 303 jme_init_tx_ring(sc); 304 305 /* Initialize shadow status block. */ 306 jme_init_ssb(sc); 307 308 /* Program MAC with resolved speed/duplex/flow-control. */ 309 if (sc->jme_flags & JME_FLAG_LINK) { 310 jme_mac_config(sc); 311 312 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 313 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 314 315 /* Set Tx ring address to the hardware. */ 316 paddr = JME_TX_RING_ADDR(sc, 0); 317 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 318 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 319 320 /* Set Rx ring address to the hardware. */ 321 paddr = JME_RX_RING_ADDR(sc, 0); 322 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 323 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 324 325 /* Restart receiver/transmitter. */ 326 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 327 RXCSR_RXQ_START); 328 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 329 } 330 331 ifp->if_flags |= IFF_RUNNING; 332 ifp->if_flags &= ~IFF_OACTIVE; 333 timeout_add_sec(&sc->jme_tick_ch, 1); 334 335 /* Reenable interrupts. */ 336 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 337 } 338 339 /* 340 * Get the current interface media status. 341 */ 342 void 343 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 344 { 345 struct jme_softc *sc = ifp->if_softc; 346 struct mii_data *mii = &sc->sc_miibus; 347 348 mii_pollstat(mii); 349 ifmr->ifm_status = mii->mii_media_status; 350 ifmr->ifm_active = mii->mii_media_active; 351 } 352 353 /* 354 * Set hardware to newly-selected media. 355 */ 356 int 357 jme_mediachange(struct ifnet *ifp) 358 { 359 struct jme_softc *sc = ifp->if_softc; 360 struct mii_data *mii = &sc->sc_miibus; 361 int error; 362 363 if (mii->mii_instance != 0) { 364 struct mii_softc *miisc; 365 366 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 367 mii_phy_reset(miisc); 368 } 369 error = mii_mediachg(mii); 370 371 return (error); 372 } 373 374 int 375 jme_match(struct device *dev, void *match, void *aux) 376 { 377 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices, 378 sizeof (jme_devices) / sizeof (jme_devices[0])); 379 } 380 381 int 382 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 383 { 384 uint32_t reg; 385 int i; 386 387 *val = 0; 388 for (i = JME_TIMEOUT; i > 0; i--) { 389 reg = CSR_READ_4(sc, JME_SMBCSR); 390 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 391 break; 392 DELAY(1); 393 } 394 395 if (i == 0) { 396 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname); 397 return (ETIMEDOUT); 398 } 399 400 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 401 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 402 for (i = JME_TIMEOUT; i > 0; i--) { 403 DELAY(1); 404 reg = CSR_READ_4(sc, JME_SMBINTF); 405 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 406 break; 407 } 408 409 if (i == 0) { 410 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname); 411 return (ETIMEDOUT); 412 } 413 414 reg = CSR_READ_4(sc, JME_SMBINTF); 415 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 416 417 return (0); 418 } 419 420 int 421 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 422 { 423 uint8_t fup, reg, val; 424 uint32_t offset; 425 int match; 426 427 offset = 0; 428 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 429 fup != JME_EEPROM_SIG0) 430 return (ENOENT); 431 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 432 fup != JME_EEPROM_SIG1) 433 return (ENOENT); 434 match = 0; 435 do { 436 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 437 break; 438 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 439 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 440 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 441 break; 442 if (reg >= JME_PAR0 && 443 reg < JME_PAR0 + ETHER_ADDR_LEN) { 444 if (jme_eeprom_read_byte(sc, offset + 2, 445 &val) != 0) 446 break; 447 eaddr[reg - JME_PAR0] = val; 448 match++; 449 } 450 } 451 /* Check for the end of EEPROM descriptor. */ 452 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 453 break; 454 /* Try next eeprom descriptor. */ 455 offset += JME_EEPROM_DESC_BYTES; 456 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 457 458 if (match == ETHER_ADDR_LEN) 459 return (0); 460 461 return (ENOENT); 462 } 463 464 void 465 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 466 { 467 uint32_t par0, par1; 468 469 /* Read station address. */ 470 par0 = CSR_READ_4(sc, JME_PAR0); 471 par1 = CSR_READ_4(sc, JME_PAR1); 472 par1 &= 0xFFFF; 473 474 eaddr[0] = (par0 >> 0) & 0xFF; 475 eaddr[1] = (par0 >> 8) & 0xFF; 476 eaddr[2] = (par0 >> 16) & 0xFF; 477 eaddr[3] = (par0 >> 24) & 0xFF; 478 eaddr[4] = (par1 >> 0) & 0xFF; 479 eaddr[5] = (par1 >> 8) & 0xFF; 480 } 481 482 void 483 jme_attach(struct device *parent, struct device *self, void *aux) 484 { 485 struct jme_softc *sc = (struct jme_softc *)self; 486 struct pci_attach_args *pa = aux; 487 pci_chipset_tag_t pc = pa->pa_pc; 488 pci_intr_handle_t ih; 489 const char *intrstr; 490 pcireg_t memtype; 491 492 struct ifnet *ifp; 493 uint32_t reg; 494 int error = 0; 495 496 /* 497 * Allocate IO memory 498 * 499 * JMC250 supports both memory mapped and I/O register space 500 * access. Because I/O register access should use different 501 * BARs to access registers it's waste of time to use I/O 502 * register spce access. JMC250 uses 16K to map entire memory 503 * space. 504 */ 505 506 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR); 507 if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt, 508 &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) { 509 printf(": can't map mem space\n"); 510 return; 511 } 512 513 if (pci_intr_map(pa, &ih) != 0) { 514 printf(": can't map interrupt\n"); 515 return; 516 } 517 518 /* 519 * Allocate IRQ 520 */ 521 intrstr = pci_intr_string(pc, ih); 522 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc, 523 sc->sc_dev.dv_xname); 524 if (sc->sc_irq_handle == NULL) { 525 printf(": could not establish interrupt"); 526 if (intrstr != NULL) 527 printf(" at %s", intrstr); 528 printf("\n"); 529 return; 530 } 531 printf(": %s", intrstr); 532 533 sc->sc_dmat = pa->pa_dmat; 534 sc->jme_pct = pa->pa_pc; 535 sc->jme_pcitag = pa->pa_tag; 536 537 /* 538 * Extract FPGA revision 539 */ 540 reg = CSR_READ_4(sc, JME_CHIPMODE); 541 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 542 CHIPMODE_NOT_FPGA) { 543 sc->jme_caps |= JME_CAP_FPGA; 544 545 if (jmedebug) { 546 printf("%s: FPGA revision : 0x%04x\n", 547 sc->sc_dev.dv_xname, 548 (reg & CHIPMODE_FPGA_REV_MASK) >> 549 CHIPMODE_FPGA_REV_SHIFT); 550 } 551 } 552 553 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT; 554 555 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 && 556 PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2) 557 sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS; 558 559 /* Reset the ethernet controller. */ 560 jme_reset(sc); 561 562 /* Get station address. */ 563 reg = CSR_READ_4(sc, JME_SMBCSR); 564 if (reg & SMBCSR_EEPROM_PRESENT) 565 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr); 566 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 567 if (error != 0 && (jmedebug)) { 568 printf("%s: ethernet hardware address " 569 "not found in EEPROM.\n", sc->sc_dev.dv_xname); 570 } 571 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr); 572 } 573 574 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 575 576 /* 577 * Save PHY address. 578 * Integrated JR0211 has fixed PHY address whereas FPGA version 579 * requires PHY probing to get correct PHY address. 580 */ 581 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 582 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 583 GPREG0_PHY_ADDR_MASK; 584 if (jmedebug) { 585 printf("%s: PHY is at address %d.\n", 586 sc->sc_dev.dv_xname, sc->jme_phyaddr); 587 } 588 } else { 589 sc->jme_phyaddr = 0; 590 } 591 592 /* Set max allowable DMA size. */ 593 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 594 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 595 596 #ifdef notyet 597 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 598 sc->jme_caps |= JME_CAP_PMCAP; 599 #endif 600 601 /* Allocate DMA stuffs */ 602 error = jme_dma_alloc(sc); 603 if (error) 604 goto fail; 605 606 ifp = &sc->sc_arpcom.ac_if; 607 ifp->if_softc = sc; 608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 609 ifp->if_init = jme_init; 610 ifp->if_ioctl = jme_ioctl; 611 ifp->if_start = jme_start; 612 ifp->if_watchdog = jme_watchdog; 613 ifp->if_baudrate = IF_Gbps(1); 614 IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1); 615 IFQ_SET_READY(&ifp->if_snd); 616 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 617 618 ifp->if_capabilities = IFCAP_VLAN_MTU; 619 620 #ifdef JME_CHECKSUM 621 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 622 IFCAP_CSUM_UDPv4; 623 #endif 624 625 #if NVLAN > 0 626 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 627 #endif 628 629 /* Set up MII bus. */ 630 sc->sc_miibus.mii_ifp = ifp; 631 sc->sc_miibus.mii_readreg = jme_miibus_readreg; 632 sc->sc_miibus.mii_writereg = jme_miibus_writereg; 633 sc->sc_miibus.mii_statchg = jme_miibus_statchg; 634 635 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange, 636 jme_mediastatus); 637 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 638 MII_OFFSET_ANY, 0); 639 640 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 641 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 642 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 643 0, NULL); 644 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 645 } else 646 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 647 648 /* 649 * Save PHYADDR for FPGA mode PHY not handled, not production hw 650 */ 651 652 if_attach(ifp); 653 ether_ifattach(ifp); 654 655 timeout_set(&sc->jme_tick_ch, jme_tick, sc); 656 657 return; 658 fail: 659 jme_detach(&sc->sc_dev, 0); 660 } 661 662 int 663 jme_detach(struct device *self, int flags) 664 { 665 struct jme_softc *sc = (struct jme_softc *)self; 666 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 667 int s; 668 669 s = splnet(); 670 jme_stop(sc); 671 splx(s); 672 673 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 674 675 /* Delete all remaining media. */ 676 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 677 678 ether_ifdetach(ifp); 679 if_detach(ifp); 680 jme_dma_free(sc); 681 682 if (sc->sc_irq_handle != NULL) { 683 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle); 684 sc->sc_irq_handle = NULL; 685 } 686 687 return (0); 688 } 689 690 int 691 jme_dma_alloc(struct jme_softc *sc) 692 { 693 struct jme_txdesc *txd; 694 struct jme_rxdesc *rxd; 695 int error, i, nsegs; 696 697 /* 698 * Create DMA stuffs for TX ring 699 */ 700 701 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1, 702 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT, 703 &sc->jme_cdata.jme_tx_ring_map); 704 if (error) 705 return (ENOBUFS); 706 707 /* Allocate DMA'able memory for TX ring */ 708 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0, 709 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs, 710 BUS_DMA_WAITOK); 711 /* XXX zero */ 712 if (error) { 713 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 714 sc->sc_dev.dv_xname); 715 return error; 716 } 717 718 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg, 719 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring, 720 BUS_DMA_NOWAIT); 721 if (error) 722 return (ENOBUFS); 723 724 /* Load the DMA map for Tx ring. */ 725 error = bus_dmamap_load(sc->sc_dmat, 726 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 727 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 728 if (error) { 729 printf("%s: could not load DMA'able memory for Tx ring.\n", 730 sc->sc_dev.dv_xname); 731 bus_dmamem_free(sc->sc_dmat, 732 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1); 733 return error; 734 } 735 sc->jme_rdata.jme_tx_ring_paddr = 736 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr; 737 738 /* 739 * Create DMA stuffs for RX ring 740 */ 741 742 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1, 743 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT, 744 &sc->jme_cdata.jme_rx_ring_map); 745 if (error) 746 return (ENOBUFS); 747 748 /* Allocate DMA'able memory for RX ring */ 749 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0, 750 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs, 751 BUS_DMA_WAITOK); 752 /* XXX zero */ 753 if (error) { 754 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 755 sc->sc_dev.dv_xname); 756 return error; 757 } 758 759 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg, 760 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring, 761 BUS_DMA_NOWAIT); 762 if (error) 763 return (ENOBUFS); 764 765 bzero(sc->jme_rdata.jme_rx_ring, JME_RX_RING_SIZE); 766 767 /* Load the DMA map for Rx ring. */ 768 error = bus_dmamap_load(sc->sc_dmat, 769 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 770 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 771 if (error) { 772 printf("%s: could not load DMA'able memory for Rx ring.\n", 773 sc->sc_dev.dv_xname); 774 bus_dmamem_free(sc->sc_dmat, 775 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 776 return error; 777 } 778 sc->jme_rdata.jme_rx_ring_paddr = 779 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr; 780 781 #if 0 782 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 783 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE; 784 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE; 785 if ((JME_ADDR_HI(tx_ring_end) != 786 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 787 (JME_ADDR_HI(rx_ring_end) != 788 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 789 printf("%s: 4GB boundary crossed, switching to 32bit " 790 "DMA address mode.\n", sc->sc_dev.dv_xname); 791 jme_dma_free(sc); 792 /* Limit DMA address space to 32bit and try again. */ 793 lowaddr = BUS_SPACE_MAXADDR_32BIT; 794 goto again; 795 } 796 #endif 797 798 /* 799 * Create DMA stuffs for shadow status block 800 */ 801 802 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1, 803 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map); 804 if (error) 805 return (ENOBUFS); 806 807 /* Allocate DMA'able memory for shared status block. */ 808 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0, 809 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK); 810 if (error) { 811 printf("%s: could not allocate DMA'able " 812 "memory for shared status block.\n", sc->sc_dev.dv_xname); 813 return error; 814 } 815 816 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg, 817 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block, 818 BUS_DMA_NOWAIT); 819 if (error) 820 return (ENOBUFS); 821 822 /* Load the DMA map for shared status block */ 823 error = bus_dmamap_load(sc->sc_dmat, 824 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 825 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT); 826 if (error) { 827 printf("%s: could not load DMA'able memory " 828 "for shared status block.\n", sc->sc_dev.dv_xname); 829 bus_dmamem_free(sc->sc_dmat, 830 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 831 return error; 832 } 833 sc->jme_rdata.jme_ssb_block_paddr = 834 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr; 835 836 /* 837 * Create DMA stuffs for TX buffers 838 */ 839 840 /* Create DMA maps for Tx buffers. */ 841 for (i = 0; i < JME_TX_RING_CNT; i++) { 842 txd = &sc->jme_cdata.jme_txdesc[i]; 843 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE, 844 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 845 &txd->tx_dmamap); 846 if (error) { 847 int j; 848 849 printf("%s: could not create %dth Tx dmamap.\n", 850 sc->sc_dev.dv_xname, i); 851 852 for (j = 0; j < i; ++j) { 853 txd = &sc->jme_cdata.jme_txdesc[j]; 854 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 855 } 856 return error; 857 } 858 859 } 860 861 /* 862 * Create DMA stuffs for RX buffers 863 */ 864 865 /* Create DMA maps for Rx buffers. */ 866 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 867 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap); 868 if (error) { 869 printf("%s: could not create spare Rx dmamap.\n", 870 sc->sc_dev.dv_xname); 871 return error; 872 } 873 for (i = 0; i < JME_RX_RING_CNT; i++) { 874 rxd = &sc->jme_cdata.jme_rxdesc[i]; 875 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 876 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 877 if (error) { 878 int j; 879 880 printf("%s: could not create %dth Rx dmamap.\n", 881 sc->sc_dev.dv_xname, i); 882 883 for (j = 0; j < i; ++j) { 884 rxd = &sc->jme_cdata.jme_rxdesc[j]; 885 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 886 } 887 bus_dmamap_destroy(sc->sc_dmat, 888 sc->jme_cdata.jme_rx_sparemap); 889 sc->jme_cdata.jme_rx_tag = NULL; 890 return error; 891 } 892 } 893 894 return 0; 895 } 896 897 void 898 jme_dma_free(struct jme_softc *sc) 899 { 900 struct jme_txdesc *txd; 901 struct jme_rxdesc *rxd; 902 int i; 903 904 /* Tx ring */ 905 bus_dmamap_unload(sc->sc_dmat, 906 sc->jme_cdata.jme_tx_ring_map); 907 bus_dmamem_free(sc->sc_dmat, 908 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1); 909 910 /* Rx ring */ 911 bus_dmamap_unload(sc->sc_dmat, 912 sc->jme_cdata.jme_rx_ring_map); 913 bus_dmamem_free(sc->sc_dmat, 914 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 915 916 /* Tx buffers */ 917 for (i = 0; i < JME_TX_RING_CNT; i++) { 918 txd = &sc->jme_cdata.jme_txdesc[i]; 919 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 920 } 921 922 /* Rx buffers */ 923 for (i = 0; i < JME_RX_RING_CNT; i++) { 924 rxd = &sc->jme_cdata.jme_rxdesc[i]; 925 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 926 } 927 bus_dmamap_destroy(sc->sc_dmat, 928 sc->jme_cdata.jme_rx_sparemap); 929 930 /* Shadow status block. */ 931 bus_dmamap_unload(sc->sc_dmat, 932 sc->jme_cdata.jme_ssb_map); 933 bus_dmamem_free(sc->sc_dmat, 934 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 935 } 936 937 #ifdef notyet 938 /* 939 * Unlike other ethernet controllers, JMC250 requires 940 * explicit resetting link speed to 10/100Mbps as gigabit 941 * link will cunsume more power than 375mA. 942 * Note, we reset the link speed to 10/100Mbps with 943 * auto-negotiation but we don't know whether that operation 944 * would succeed or not as we have no control after powering 945 * off. If the renegotiation fail WOL may not work. Running 946 * at 1Gbps draws more power than 375mA at 3.3V which is 947 * specified in PCI specification and that would result in 948 * complete shutdowning power to ethernet controller. 949 * 950 * TODO 951 * Save current negotiated media speed/duplex/flow-control 952 * to softc and restore the same link again after resuming. 953 * PHY handling such as power down/resetting to 100Mbps 954 * may be better handled in suspend method in phy driver. 955 */ 956 void 957 jme_setlinkspeed(struct jme_softc *sc) 958 { 959 struct mii_data *mii; 960 int aneg, i; 961 962 JME_LOCK_ASSERT(sc); 963 964 mii = &sc->sc_miibus; 965 mii_pollstat(mii); 966 aneg = 0; 967 if ((mii->mii_media_status & IFM_AVALID) != 0) { 968 switch IFM_SUBTYPE(mii->mii_media_active) { 969 case IFM_10_T: 970 case IFM_100_TX: 971 return; 972 case IFM_1000_T: 973 aneg++; 974 default: 975 break; 976 } 977 } 978 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0); 979 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR, 980 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 981 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR, 982 BMCR_AUTOEN | BMCR_STARTNEG); 983 DELAY(1000); 984 if (aneg != 0) { 985 /* Poll link state until jme(4) get a 10/100 link. */ 986 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 987 mii_pollstat(mii); 988 if ((mii->mii_media_status & IFM_AVALID) != 0) { 989 switch (IFM_SUBTYPE(mii->mii_media_active)) { 990 case IFM_10_T: 991 case IFM_100_TX: 992 jme_mac_config(sc); 993 return; 994 default: 995 break; 996 } 997 } 998 JME_UNLOCK(sc); 999 pause("jmelnk", hz); 1000 JME_LOCK(sc); 1001 } 1002 if (i == MII_ANEGTICKS_GIGE) 1003 printf("%s: establishing link failed, " 1004 "WOL may not work!\n", sc->sc_dev.dv_xname); 1005 } 1006 /* 1007 * No link, force MAC to have 100Mbps, full-duplex link. 1008 * This is the last resort and may/may not work. 1009 */ 1010 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1011 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1012 jme_mac_config(sc); 1013 } 1014 1015 void 1016 jme_setwol(struct jme_softc *sc) 1017 { 1018 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1019 uint32_t gpr, pmcs; 1020 uint16_t pmstat; 1021 int pmc; 1022 1023 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) { 1024 /* No PME capability, PHY power down. */ 1025 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1026 MII_BMCR, BMCR_PDOWN); 1027 return; 1028 } 1029 1030 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1031 pmcs = CSR_READ_4(sc, JME_PMCS); 1032 pmcs &= ~PMCS_WOL_ENB_MASK; 1033 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1034 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1035 /* Enable PME message. */ 1036 gpr |= GPREG0_PME_ENB; 1037 /* For gigabit controllers, reset link speed to 10/100. */ 1038 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1039 jme_setlinkspeed(sc); 1040 } 1041 1042 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1043 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1044 1045 /* Request PME. */ 1046 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2); 1047 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1048 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1049 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1050 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1051 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1052 /* No WOL, PHY power down. */ 1053 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1054 MII_BMCR, BMCR_PDOWN); 1055 } 1056 } 1057 #endif 1058 1059 int 1060 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1061 { 1062 struct jme_txdesc *txd; 1063 struct jme_desc *desc; 1064 struct mbuf *m; 1065 int maxsegs; 1066 int error, i, prod; 1067 uint32_t cflags; 1068 1069 prod = sc->jme_cdata.jme_tx_prod; 1070 txd = &sc->jme_cdata.jme_txdesc[prod]; 1071 1072 maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) - 1073 (JME_TXD_RSVD + 1); 1074 if (maxsegs > JME_MAXTXSEGS) 1075 maxsegs = JME_MAXTXSEGS; 1076 if (maxsegs < (sc->jme_txd_spare - 1)) 1077 panic("%s: not enough segments %d\n", sc->sc_dev.dv_xname, 1078 maxsegs); 1079 1080 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, 1081 *m_head, BUS_DMA_NOWAIT); 1082 if (error != 0) { 1083 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1084 error = EFBIG; 1085 } 1086 if (error == EFBIG) { 1087 error = 0; 1088 1089 MGETHDR(m, M_DONTWAIT, MT_DATA); 1090 if (m == NULL) { 1091 printf("%s: can't defrag TX mbuf\n", 1092 sc->sc_dev.dv_xname); 1093 m_freem(*m_head); 1094 *m_head = NULL; 1095 return (ENOBUFS); 1096 } 1097 1098 M_DUP_PKTHDR(m, *m_head); 1099 if ((*m_head)->m_pkthdr.len > MHLEN) { 1100 MCLGET(m, M_DONTWAIT); 1101 if (!(m->m_flags & M_EXT)) { 1102 m_freem(*m_head); 1103 m_freem(m); 1104 *m_head = NULL; 1105 return (ENOBUFS); 1106 } 1107 } 1108 1109 m_copydata(*m_head, 0, (*m_head)->m_pkthdr.len, mtod(m, caddr_t)); 1110 m_freem(*m_head); 1111 m->m_len = m->m_pkthdr.len; 1112 *m_head = m; 1113 1114 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1115 txd->tx_dmamap, *m_head, 1116 BUS_DMA_NOWAIT); 1117 if (error != 0) { 1118 printf("%s: could not load defragged TX mbuf\n", 1119 sc->sc_dev.dv_xname); 1120 if (!error) { 1121 bus_dmamap_unload(sc->sc_dmat, 1122 txd->tx_dmamap); 1123 error = EFBIG; 1124 } 1125 m_freem(*m_head); 1126 *m_head = NULL; 1127 return (error); 1128 } 1129 } else if (error) { 1130 printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname); 1131 return (error); 1132 } 1133 1134 m = *m_head; 1135 cflags = 0; 1136 1137 /* Configure checksum offload. */ 1138 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1139 cflags |= JME_TD_IPCSUM; 1140 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1141 cflags |= JME_TD_TCPCSUM; 1142 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1143 cflags |= JME_TD_UDPCSUM; 1144 1145 #if NVLAN > 0 1146 /* Configure VLAN. */ 1147 if (m->m_flags & M_VLANTAG) { 1148 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1149 cflags |= JME_TD_VLAN_TAG; 1150 } 1151 #endif 1152 1153 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1154 desc->flags = htole32(cflags); 1155 desc->buflen = 0; 1156 desc->addr_hi = htole32(m->m_pkthdr.len); 1157 desc->addr_lo = 0; 1158 sc->jme_cdata.jme_tx_cnt++; 1159 KASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD); 1160 JME_DESC_INC(prod, JME_TX_RING_CNT); 1161 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) { 1162 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1163 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1164 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len); 1165 desc->addr_hi = 1166 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr)); 1167 desc->addr_lo = 1168 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr)); 1169 1170 sc->jme_cdata.jme_tx_cnt++; 1171 KASSERT(sc->jme_cdata.jme_tx_cnt <= 1172 JME_TX_RING_CNT - JME_TXD_RSVD); 1173 JME_DESC_INC(prod, JME_TX_RING_CNT); 1174 } 1175 1176 /* Update producer index. */ 1177 sc->jme_cdata.jme_tx_prod = prod; 1178 /* 1179 * Finally request interrupt and give the first descriptor 1180 * owenership to hardware. 1181 */ 1182 desc = txd->tx_desc; 1183 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1184 1185 txd->tx_m = m; 1186 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + 1; 1187 1188 /* Sync descriptors. */ 1189 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1190 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1191 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1192 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1193 1194 return (0); 1195 } 1196 1197 void 1198 jme_start(struct ifnet *ifp) 1199 { 1200 struct jme_softc *sc = ifp->if_softc; 1201 struct mbuf *m_head; 1202 int enq = 0; 1203 1204 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1205 return; 1206 1207 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1208 jme_txeof(sc); 1209 1210 for (;;) { 1211 /* 1212 * Check number of available TX descs, always 1213 * leave JME_TXD_RSVD free TX descs. 1214 */ 1215 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1216 JME_TX_RING_CNT - JME_TXD_RSVD) { 1217 ifp->if_flags |= IFF_OACTIVE; 1218 break; 1219 } 1220 1221 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1222 if (m_head == NULL) 1223 break; 1224 1225 /* 1226 * Pack the data into the transmit ring. If we 1227 * don't have room, set the OACTIVE flag and wait 1228 * for the NIC to drain the ring. 1229 */ 1230 if (jme_encap(sc, &m_head)) { 1231 if (m_head == NULL) { 1232 ifp->if_oerrors++; 1233 break; 1234 } 1235 ifp->if_flags |= IFF_OACTIVE; 1236 break; 1237 } 1238 enq++; 1239 1240 #if NBPFILTER > 0 1241 /* 1242 * If there's a BPF listener, bounce a copy of this frame 1243 * to him. 1244 */ 1245 if (ifp->if_bpf != NULL) 1246 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1247 #endif 1248 } 1249 1250 if (enq > 0) { 1251 /* 1252 * Reading TXCSR takes very long time under heavy load 1253 * so cache TXCSR value and writes the ORed value with 1254 * the kick command to the TXCSR. This saves one register 1255 * access cycle. 1256 */ 1257 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1258 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1259 /* Set a timeout in case the chip goes out to lunch. */ 1260 ifp->if_timer = JME_TX_TIMEOUT; 1261 } 1262 } 1263 1264 void 1265 jme_watchdog(struct ifnet *ifp) 1266 { 1267 struct jme_softc *sc = ifp->if_softc; 1268 1269 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1270 printf("%s: watchdog timeout (missed link)\n", 1271 sc->sc_dev.dv_xname); 1272 ifp->if_oerrors++; 1273 jme_init(ifp); 1274 return; 1275 } 1276 1277 jme_txeof(sc); 1278 if (sc->jme_cdata.jme_tx_cnt == 0) { 1279 printf("%s: watchdog timeout (missed Tx interrupts) " 1280 "-- recovering\n", sc->sc_dev.dv_xname); 1281 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1282 jme_start(ifp); 1283 return; 1284 } 1285 1286 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1287 ifp->if_oerrors++; 1288 jme_init(ifp); 1289 1290 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1291 jme_start(ifp); 1292 } 1293 1294 int 1295 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1296 { 1297 struct jme_softc *sc = ifp->if_softc; 1298 struct mii_data *mii = &sc->sc_miibus; 1299 struct ifaddr *ifa = (struct ifaddr *)data; 1300 struct ifreq *ifr = (struct ifreq *)data; 1301 int error = 0, s; 1302 1303 s = splnet(); 1304 1305 switch (cmd) { 1306 case SIOCSIFADDR: 1307 ifp->if_flags |= IFF_UP; 1308 if (!(ifp->if_flags & IFF_RUNNING)) 1309 jme_init(ifp); 1310 #ifdef INET 1311 if (ifa->ifa_addr->sa_family == AF_INET) 1312 arp_ifinit(&sc->sc_arpcom, ifa); 1313 #endif 1314 break; 1315 1316 case SIOCSIFFLAGS: 1317 if (ifp->if_flags & IFF_UP) { 1318 if (ifp->if_flags & IFF_RUNNING) 1319 error = ENETRESET; 1320 else 1321 jme_init(ifp); 1322 } else { 1323 if (ifp->if_flags & IFF_RUNNING) 1324 jme_stop(sc); 1325 } 1326 break; 1327 1328 case SIOCSIFMEDIA: 1329 case SIOCGIFMEDIA: 1330 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1331 break; 1332 1333 default: 1334 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1335 } 1336 1337 if (error == ENETRESET) { 1338 if (ifp->if_flags & IFF_RUNNING) 1339 jme_set_filter(sc); 1340 error = 0; 1341 } 1342 1343 splx(s); 1344 return (error); 1345 } 1346 1347 void 1348 jme_mac_config(struct jme_softc *sc) 1349 { 1350 struct mii_data *mii; 1351 uint32_t ghc, rxmac, txmac, txpause, gp1; 1352 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1353 1354 mii = &sc->sc_miibus; 1355 1356 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1357 DELAY(10); 1358 CSR_WRITE_4(sc, JME_GHC, 0); 1359 ghc = 0; 1360 rxmac = CSR_READ_4(sc, JME_RXMAC); 1361 rxmac &= ~RXMAC_FC_ENB; 1362 txmac = CSR_READ_4(sc, JME_TXMAC); 1363 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1364 txpause = CSR_READ_4(sc, JME_TXPFC); 1365 txpause &= ~TXPFC_PAUSE_ENB; 1366 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1367 ghc |= GHC_FULL_DUPLEX; 1368 rxmac &= ~RXMAC_COLL_DET_ENB; 1369 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1370 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1371 TXMAC_FRAME_BURST); 1372 #ifdef notyet 1373 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1374 txpause |= TXPFC_PAUSE_ENB; 1375 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1376 rxmac |= RXMAC_FC_ENB; 1377 #endif 1378 /* Disable retry transmit timer/retry limit. */ 1379 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1380 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1381 } else { 1382 rxmac |= RXMAC_COLL_DET_ENB; 1383 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1384 /* Enable retry transmit timer/retry limit. */ 1385 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1386 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1387 } 1388 1389 /* 1390 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1391 */ 1392 gp1 = CSR_READ_4(sc, JME_GPREG1); 1393 gp1 &= ~GPREG1_HALF_PATCH; 1394 1395 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1396 hdx = 1; 1397 1398 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1399 case IFM_10_T: 1400 ghc |= GHC_SPEED_10; 1401 if (hdx) 1402 gp1 |= GPREG1_HALF_PATCH; 1403 break; 1404 1405 case IFM_100_TX: 1406 ghc |= GHC_SPEED_100; 1407 if (hdx) 1408 gp1 |= GPREG1_HALF_PATCH; 1409 1410 /* 1411 * Use extended FIFO depth to workaround CRC errors 1412 * emitted by chips before JMC250B 1413 */ 1414 phyconf = JMPHY_CONF_EXTFIFO; 1415 break; 1416 1417 case IFM_1000_T: 1418 if (sc->jme_caps & JME_CAP_FASTETH) 1419 break; 1420 1421 ghc |= GHC_SPEED_1000; 1422 if (hdx) 1423 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1424 break; 1425 1426 default: 1427 break; 1428 } 1429 1430 if (sc->jme_revfm >= 2) { 1431 /* set clock sources for tx mac and offload engine */ 1432 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1433 ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000; 1434 else 1435 ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100; 1436 } 1437 1438 CSR_WRITE_4(sc, JME_GHC, ghc); 1439 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1440 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1441 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1442 1443 if (sc->jme_workaround & JME_WA_CRCERRORS) { 1444 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1445 JMPHY_CONF, phyconf); 1446 } 1447 if (sc->jme_workaround & JME_WA_PACKETLOSS) 1448 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1449 } 1450 1451 int 1452 jme_intr(void *xsc) 1453 { 1454 struct jme_softc *sc = xsc; 1455 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1456 uint32_t status; 1457 int claimed = 0; 1458 1459 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1460 if (status == 0 || status == 0xFFFFFFFF) 1461 return (0); 1462 1463 /* Disable interrupts. */ 1464 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1465 1466 status = CSR_READ_4(sc, JME_INTR_STATUS); 1467 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1468 goto back; 1469 1470 /* Reset PCC counter/timer and Ack interrupts. */ 1471 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1472 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1473 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1474 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1475 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1476 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1477 1478 if (ifp->if_flags & IFF_RUNNING) { 1479 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1480 jme_rxeof(sc); 1481 1482 if (status & INTR_RXQ_DESC_EMPTY) { 1483 /* 1484 * Notify hardware availability of new Rx buffers. 1485 * Reading RXCSR takes very long time under heavy 1486 * load so cache RXCSR value and writes the ORed 1487 * value with the kick command to the RXCSR. This 1488 * saves one register access cycle. 1489 */ 1490 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1491 RXCSR_RX_ENB | RXCSR_RXQ_START); 1492 } 1493 1494 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1495 jme_txeof(sc); 1496 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1497 jme_start(ifp); 1498 } 1499 } 1500 claimed = 1; 1501 back: 1502 /* Reenable interrupts. */ 1503 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1504 1505 return (claimed); 1506 } 1507 1508 void 1509 jme_txeof(struct jme_softc *sc) 1510 { 1511 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1512 struct jme_txdesc *txd; 1513 uint32_t status; 1514 int cons, nsegs; 1515 1516 cons = sc->jme_cdata.jme_tx_cons; 1517 if (cons == sc->jme_cdata.jme_tx_prod) 1518 return; 1519 1520 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1521 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1522 1523 /* 1524 * Go through our Tx list and free mbufs for those 1525 * frames which have been transmitted. 1526 */ 1527 while (cons != sc->jme_cdata.jme_tx_prod) { 1528 txd = &sc->jme_cdata.jme_txdesc[cons]; 1529 1530 if (txd->tx_m == NULL) 1531 panic("%s: freeing NULL mbuf!\n", sc->sc_dev.dv_xname); 1532 1533 status = letoh32(txd->tx_desc->flags); 1534 if ((status & JME_TD_OWN) == JME_TD_OWN) 1535 break; 1536 1537 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 1538 ifp->if_oerrors++; 1539 } else { 1540 ifp->if_opackets++; 1541 if (status & JME_TD_COLLISION) { 1542 ifp->if_collisions += 1543 letoh32(txd->tx_desc->buflen) & 1544 JME_TD_BUF_LEN_MASK; 1545 } 1546 } 1547 1548 /* 1549 * Only the first descriptor of multi-descriptor 1550 * transmission is updated so driver have to skip entire 1551 * chained buffers for the transmiited frame. In other 1552 * words, JME_TD_OWN bit is valid only at the first 1553 * descriptor of a multi-descriptor transmission. 1554 */ 1555 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1556 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 1557 JME_DESC_INC(cons, JME_TX_RING_CNT); 1558 } 1559 1560 /* Reclaim transferred mbufs. */ 1561 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1562 m_freem(txd->tx_m); 1563 txd->tx_m = NULL; 1564 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 1565 if (sc->jme_cdata.jme_tx_cnt < 0) 1566 panic("%s: Active Tx desc counter was garbled\n", 1567 sc->sc_dev.dv_xname); 1568 txd->tx_ndesc = 0; 1569 } 1570 sc->jme_cdata.jme_tx_cons = cons; 1571 1572 if (sc->jme_cdata.jme_tx_cnt == 0) 1573 ifp->if_timer = 0; 1574 1575 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 1576 JME_TX_RING_CNT - JME_TXD_RSVD) 1577 ifp->if_flags &= ~IFF_OACTIVE; 1578 1579 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1580 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1581 } 1582 1583 void 1584 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 1585 { 1586 int i; 1587 1588 for (i = 0; i < count; ++i) { 1589 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 1590 1591 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 1592 desc->buflen = htole32(MCLBYTES); 1593 JME_DESC_INC(cons, JME_RX_RING_CNT); 1594 } 1595 } 1596 1597 /* Receive a frame. */ 1598 void 1599 jme_rxpkt(struct jme_softc *sc) 1600 { 1601 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1602 struct jme_desc *desc; 1603 struct jme_rxdesc *rxd; 1604 struct mbuf *mp, *m; 1605 uint32_t flags, status; 1606 int cons, count, nsegs; 1607 1608 cons = sc->jme_cdata.jme_rx_cons; 1609 desc = &sc->jme_rdata.jme_rx_ring[cons]; 1610 flags = letoh32(desc->flags); 1611 status = letoh32(desc->buflen); 1612 nsegs = JME_RX_NSEGS(status); 1613 1614 if (status & JME_RX_ERR_STAT) { 1615 ifp->if_ierrors++; 1616 jme_discard_rxbufs(sc, cons, nsegs); 1617 #ifdef JME_SHOW_ERRORS 1618 printf("%s : receive error = 0x%b\n", 1619 sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS); 1620 #endif 1621 sc->jme_cdata.jme_rx_cons += nsegs; 1622 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1623 return; 1624 } 1625 1626 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 1627 for (count = 0; count < nsegs; count++, 1628 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 1629 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 1630 mp = rxd->rx_m; 1631 1632 /* Add a new receive buffer to the ring. */ 1633 if (jme_newbuf(sc, rxd, 0) != 0) { 1634 ifp->if_iqdrops++; 1635 /* Reuse buffer. */ 1636 jme_discard_rxbufs(sc, cons, nsegs - count); 1637 if (sc->jme_cdata.jme_rxhead != NULL) { 1638 m_freem(sc->jme_cdata.jme_rxhead); 1639 JME_RXCHAIN_RESET(sc); 1640 } 1641 break; 1642 } 1643 1644 /* 1645 * Assume we've received a full sized frame. 1646 * Actual size is fixed when we encounter the end of 1647 * multi-segmented frame. 1648 */ 1649 mp->m_len = MCLBYTES; 1650 1651 /* Chain received mbufs. */ 1652 if (sc->jme_cdata.jme_rxhead == NULL) { 1653 sc->jme_cdata.jme_rxhead = mp; 1654 sc->jme_cdata.jme_rxtail = mp; 1655 } else { 1656 /* 1657 * Receive processor can receive a maximum frame 1658 * size of 65535 bytes. 1659 */ 1660 mp->m_flags &= ~M_PKTHDR; 1661 sc->jme_cdata.jme_rxtail->m_next = mp; 1662 sc->jme_cdata.jme_rxtail = mp; 1663 } 1664 1665 if (count == nsegs - 1) { 1666 /* Last desc. for this frame. */ 1667 m = sc->jme_cdata.jme_rxhead; 1668 /* XXX assert PKTHDR? */ 1669 m->m_flags |= M_PKTHDR; 1670 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 1671 if (nsegs > 1) { 1672 /* Set first mbuf size. */ 1673 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 1674 /* Set last mbuf size. */ 1675 mp->m_len = sc->jme_cdata.jme_rxlen - 1676 ((MCLBYTES - JME_RX_PAD_BYTES) + 1677 (MCLBYTES * (nsegs - 2))); 1678 } else { 1679 m->m_len = sc->jme_cdata.jme_rxlen; 1680 } 1681 m->m_pkthdr.rcvif = ifp; 1682 1683 /* 1684 * Account for 10bytes auto padding which is used 1685 * to align IP header on 32bit boundary. Also note, 1686 * CRC bytes is automatically removed by the 1687 * hardware. 1688 */ 1689 m->m_data += JME_RX_PAD_BYTES; 1690 1691 /* Set checksum information. */ 1692 if (flags & JME_RD_IPV4) { 1693 if (flags & JME_RD_IPCSUM) 1694 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1695 if ((flags & JME_RD_MORE_FRAG) == 0 && 1696 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 1697 (JME_RD_TCP | JME_RD_TCPCSUM) || 1698 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 1699 (JME_RD_UDP | JME_RD_UDPCSUM))) { 1700 m->m_pkthdr.csum_flags |= 1701 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1702 } 1703 } 1704 1705 #if NVLAN > 0 1706 /* Check for VLAN tagged packets. */ 1707 if (flags & JME_RD_VLAN_TAG) { 1708 m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK; 1709 m->m_flags |= M_VLANTAG; 1710 } 1711 #endif 1712 1713 #if NBPFILTER > 0 1714 if (ifp->if_bpf) 1715 bpf_mtap_ether(ifp->if_bpf, m, 1716 BPF_DIRECTION_IN); 1717 #endif 1718 1719 ifp->if_ipackets++; 1720 /* Pass it on. */ 1721 ether_input_mbuf(ifp, m); 1722 1723 /* Reset mbuf chains. */ 1724 JME_RXCHAIN_RESET(sc); 1725 } 1726 } 1727 1728 sc->jme_cdata.jme_rx_cons += nsegs; 1729 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1730 } 1731 1732 void 1733 jme_rxeof(struct jme_softc *sc) 1734 { 1735 struct jme_desc *desc; 1736 int nsegs, prog, pktlen; 1737 1738 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1739 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1740 1741 prog = 0; 1742 for (;;) { 1743 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 1744 if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 1745 break; 1746 if ((letoh32(desc->buflen) & JME_RD_VALID) == 0) 1747 break; 1748 1749 /* 1750 * Check number of segments against received bytes. 1751 * Non-matching value would indicate that hardware 1752 * is still trying to update Rx descriptors. I'm not 1753 * sure whether this check is needed. 1754 */ 1755 nsegs = JME_RX_NSEGS(letoh32(desc->buflen)); 1756 pktlen = JME_RX_BYTES(letoh32(desc->buflen)); 1757 if (nsegs != howmany(pktlen, MCLBYTES)) { 1758 printf("%s: RX fragment count(%d) " 1759 "and packet size(%d) mismach\n", 1760 sc->sc_dev.dv_xname, nsegs, pktlen); 1761 break; 1762 } 1763 1764 /* Received a frame. */ 1765 jme_rxpkt(sc); 1766 prog++; 1767 } 1768 1769 if (prog > 0) { 1770 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1771 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1772 } 1773 } 1774 1775 void 1776 jme_tick(void *xsc) 1777 { 1778 struct jme_softc *sc = xsc; 1779 struct mii_data *mii = &sc->sc_miibus; 1780 int s; 1781 1782 s = splnet(); 1783 mii_tick(mii); 1784 timeout_add_sec(&sc->jme_tick_ch, 1); 1785 splx(s); 1786 } 1787 1788 void 1789 jme_reset(struct jme_softc *sc) 1790 { 1791 #ifdef foo 1792 /* Stop receiver, transmitter. */ 1793 jme_stop_rx(sc); 1794 jme_stop_tx(sc); 1795 #endif 1796 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1797 DELAY(10); 1798 CSR_WRITE_4(sc, JME_GHC, 0); 1799 } 1800 1801 int 1802 jme_init(struct ifnet *ifp) 1803 { 1804 struct jme_softc *sc = ifp->if_softc; 1805 struct mii_data *mii; 1806 uint8_t eaddr[ETHER_ADDR_LEN]; 1807 bus_addr_t paddr; 1808 uint32_t reg; 1809 int error; 1810 1811 /* 1812 * Cancel any pending I/O. 1813 */ 1814 jme_stop(sc); 1815 1816 /* 1817 * Reset the chip to a known state. 1818 */ 1819 jme_reset(sc); 1820 1821 /* 1822 * Since we always use 64bit address mode for transmitting, 1823 * each Tx request requires one more dummy descriptor. 1824 */ 1825 sc->jme_txd_spare = 1826 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1; 1827 KASSERT(sc->jme_txd_spare >= 2); 1828 1829 /* Init descriptors. */ 1830 error = jme_init_rx_ring(sc); 1831 if (error != 0) { 1832 printf("%s: initialization failed: no memory for Rx buffers.\n", 1833 sc->sc_dev.dv_xname); 1834 jme_stop(sc); 1835 return (error); 1836 } 1837 jme_init_tx_ring(sc); 1838 1839 /* Initialize shadow status block. */ 1840 jme_init_ssb(sc); 1841 1842 /* Reprogram the station address. */ 1843 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1844 CSR_WRITE_4(sc, JME_PAR0, 1845 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 1846 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 1847 1848 /* 1849 * Configure Tx queue. 1850 * Tx priority queue weight value : 0 1851 * Tx FIFO threshold for processing next packet : 16QW 1852 * Maximum Tx DMA length : 512 1853 * Allow Tx DMA burst. 1854 */ 1855 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 1856 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 1857 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 1858 sc->jme_txcsr |= sc->jme_tx_dma_size; 1859 sc->jme_txcsr |= TXCSR_DMA_BURST; 1860 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 1861 1862 /* Set Tx descriptor counter. */ 1863 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 1864 1865 /* Set Tx ring address to the hardware. */ 1866 paddr = JME_TX_RING_ADDR(sc, 0); 1867 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 1868 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 1869 1870 /* Configure TxMAC parameters. */ 1871 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 1872 reg |= TXMAC_THRESH_1_PKT; 1873 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 1874 CSR_WRITE_4(sc, JME_TXMAC, reg); 1875 1876 /* 1877 * Configure Rx queue. 1878 * FIFO full threshold for transmitting Tx pause packet : 128T 1879 * FIFO threshold for processing next packet : 128QW 1880 * Rx queue 0 select 1881 * Max Rx DMA length : 128 1882 * Rx descriptor retry : 32 1883 * Rx descriptor retry time gap : 256ns 1884 * Don't receive runt/bad frame. 1885 */ 1886 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 1887 1888 /* 1889 * Since Rx FIFO size is 4K bytes, receiving frames larger 1890 * than 4K bytes will suffer from Rx FIFO overruns. So 1891 * decrease FIFO threshold to reduce the FIFO overruns for 1892 * frames larger than 4000 bytes. 1893 * For best performance of standard MTU sized frames use 1894 * maximum allowable FIFO threshold, which is 32QW for 1895 * chips with a full mask >= 2 otherwise 128QW. FIFO 1896 * thresholds of 64QW and 128QW are not valid for chips 1897 * with a full mask >= 2. 1898 */ 1899 if (sc->jme_revfm >= 2) 1900 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1901 else { 1902 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1903 ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE) 1904 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1905 else 1906 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 1907 } 1908 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 1909 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 1910 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 1911 /* XXX TODO DROP_BAD */ 1912 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 1913 1914 /* Set Rx descriptor counter. */ 1915 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 1916 1917 /* Set Rx ring address to the hardware. */ 1918 paddr = JME_RX_RING_ADDR(sc, 0); 1919 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 1920 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 1921 1922 /* Clear receive filter. */ 1923 CSR_WRITE_4(sc, JME_RXMAC, 0); 1924 1925 /* Set up the receive filter. */ 1926 jme_set_filter(sc); 1927 jme_set_vlan(sc); 1928 1929 /* 1930 * Disable all WOL bits as WOL can interfere normal Rx 1931 * operation. Also clear WOL detection status bits. 1932 */ 1933 reg = CSR_READ_4(sc, JME_PMCS); 1934 reg &= ~PMCS_WOL_ENB_MASK; 1935 CSR_WRITE_4(sc, JME_PMCS, reg); 1936 1937 /* 1938 * Pad 10bytes right before received frame. This will greatly 1939 * help Rx performance on strict-alignment architectures as 1940 * it does not need to copy the frame to align the payload. 1941 */ 1942 reg = CSR_READ_4(sc, JME_RXMAC); 1943 reg |= RXMAC_PAD_10BYTES; 1944 reg |= RXMAC_CSUM_ENB; 1945 CSR_WRITE_4(sc, JME_RXMAC, reg); 1946 1947 /* Configure general purpose reg0 */ 1948 reg = CSR_READ_4(sc, JME_GPREG0); 1949 reg &= ~GPREG0_PCC_UNIT_MASK; 1950 /* Set PCC timer resolution to micro-seconds unit. */ 1951 reg |= GPREG0_PCC_UNIT_US; 1952 /* 1953 * Disable all shadow register posting as we have to read 1954 * JME_INTR_STATUS register in jme_intr. Also it seems 1955 * that it's hard to synchronize interrupt status between 1956 * hardware and software with shadow posting due to 1957 * requirements of bus_dmamap_sync(9). 1958 */ 1959 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 1960 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 1961 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 1962 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 1963 /* Disable posting of DW0. */ 1964 reg &= ~GPREG0_POST_DW0_ENB; 1965 /* Clear PME message. */ 1966 reg &= ~GPREG0_PME_ENB; 1967 /* Set PHY address. */ 1968 reg &= ~GPREG0_PHY_ADDR_MASK; 1969 reg |= sc->jme_phyaddr; 1970 CSR_WRITE_4(sc, JME_GPREG0, reg); 1971 1972 /* Configure Tx queue 0 packet completion coalescing. */ 1973 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1974 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 1975 PCCTX_COAL_TO_MASK; 1976 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1977 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 1978 PCCTX_COAL_PKT_MASK; 1979 reg |= PCCTX_COAL_TXQ0; 1980 CSR_WRITE_4(sc, JME_PCCTX, reg); 1981 1982 /* Configure Rx queue 0 packet completion coalescing. */ 1983 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1984 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 1985 PCCRX_COAL_TO_MASK; 1986 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1987 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 1988 PCCRX_COAL_PKT_MASK; 1989 CSR_WRITE_4(sc, JME_PCCRX0, reg); 1990 1991 /* Configure shadow status block but don't enable posting. */ 1992 paddr = sc->jme_rdata.jme_ssb_block_paddr; 1993 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 1994 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 1995 1996 /* Disable Timer 1 and Timer 2. */ 1997 CSR_WRITE_4(sc, JME_TIMER1, 0); 1998 CSR_WRITE_4(sc, JME_TIMER2, 0); 1999 2000 /* Configure retry transmit period, retry limit value. */ 2001 CSR_WRITE_4(sc, JME_TXTRHD, 2002 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2003 TXTRHD_RT_PERIOD_MASK) | 2004 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2005 TXTRHD_RT_LIMIT_SHIFT)); 2006 2007 /* Disable RSS. */ 2008 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 2009 2010 /* Initialize the interrupt mask. */ 2011 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2012 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2013 2014 /* 2015 * Enabling Tx/Rx DMA engines and Rx queue processing is 2016 * done after detection of valid link in jme_miibus_statchg. 2017 */ 2018 sc->jme_flags &= ~JME_FLAG_LINK; 2019 2020 /* Set the current media. */ 2021 mii = &sc->sc_miibus; 2022 mii_mediachg(mii); 2023 2024 timeout_add_sec(&sc->jme_tick_ch, 1); 2025 2026 ifp->if_flags |= IFF_RUNNING; 2027 ifp->if_flags &= ~IFF_OACTIVE; 2028 2029 return (0); 2030 } 2031 2032 void 2033 jme_stop(struct jme_softc *sc) 2034 { 2035 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2036 struct jme_txdesc *txd; 2037 struct jme_rxdesc *rxd; 2038 int i; 2039 2040 /* 2041 * Mark the interface down and cancel the watchdog timer. 2042 */ 2043 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2044 ifp->if_timer = 0; 2045 2046 timeout_del(&sc->jme_tick_ch); 2047 sc->jme_flags &= ~JME_FLAG_LINK; 2048 2049 /* 2050 * Disable interrupts. 2051 */ 2052 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2053 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2054 2055 /* Disable updating shadow status block. */ 2056 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2057 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2058 2059 /* Stop receiver, transmitter. */ 2060 jme_stop_rx(sc); 2061 jme_stop_tx(sc); 2062 2063 #ifdef foo 2064 /* Reclaim Rx/Tx buffers that have been completed. */ 2065 jme_rxeof(sc); 2066 if (sc->jme_cdata.jme_rxhead != NULL) 2067 m_freem(sc->jme_cdata.jme_rxhead); 2068 JME_RXCHAIN_RESET(sc); 2069 jme_txeof(sc); 2070 #endif 2071 2072 /* 2073 * Free partial finished RX segments 2074 */ 2075 if (sc->jme_cdata.jme_rxhead != NULL) 2076 m_freem(sc->jme_cdata.jme_rxhead); 2077 JME_RXCHAIN_RESET(sc); 2078 2079 /* 2080 * Free RX and TX mbufs still in the queues. 2081 */ 2082 for (i = 0; i < JME_RX_RING_CNT; i++) { 2083 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2084 if (rxd->rx_m != NULL) { 2085 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2086 m_freem(rxd->rx_m); 2087 rxd->rx_m = NULL; 2088 } 2089 } 2090 for (i = 0; i < JME_TX_RING_CNT; i++) { 2091 txd = &sc->jme_cdata.jme_txdesc[i]; 2092 if (txd->tx_m != NULL) { 2093 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2094 m_freem(txd->tx_m); 2095 txd->tx_m = NULL; 2096 txd->tx_ndesc = 0; 2097 } 2098 } 2099 } 2100 2101 void 2102 jme_stop_tx(struct jme_softc *sc) 2103 { 2104 uint32_t reg; 2105 int i; 2106 2107 reg = CSR_READ_4(sc, JME_TXCSR); 2108 if ((reg & TXCSR_TX_ENB) == 0) 2109 return; 2110 reg &= ~TXCSR_TX_ENB; 2111 CSR_WRITE_4(sc, JME_TXCSR, reg); 2112 for (i = JME_TIMEOUT; i > 0; i--) { 2113 DELAY(1); 2114 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2115 break; 2116 } 2117 if (i == 0) 2118 printf("%s: stopping transmitter timeout!\n", 2119 sc->sc_dev.dv_xname); 2120 } 2121 2122 void 2123 jme_stop_rx(struct jme_softc *sc) 2124 { 2125 uint32_t reg; 2126 int i; 2127 2128 reg = CSR_READ_4(sc, JME_RXCSR); 2129 if ((reg & RXCSR_RX_ENB) == 0) 2130 return; 2131 reg &= ~RXCSR_RX_ENB; 2132 CSR_WRITE_4(sc, JME_RXCSR, reg); 2133 for (i = JME_TIMEOUT; i > 0; i--) { 2134 DELAY(1); 2135 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2136 break; 2137 } 2138 if (i == 0) 2139 printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname); 2140 } 2141 2142 void 2143 jme_init_tx_ring(struct jme_softc *sc) 2144 { 2145 struct jme_ring_data *rd; 2146 struct jme_txdesc *txd; 2147 int i; 2148 2149 sc->jme_cdata.jme_tx_prod = 0; 2150 sc->jme_cdata.jme_tx_cons = 0; 2151 sc->jme_cdata.jme_tx_cnt = 0; 2152 2153 rd = &sc->jme_rdata; 2154 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2155 for (i = 0; i < JME_TX_RING_CNT; i++) { 2156 txd = &sc->jme_cdata.jme_txdesc[i]; 2157 txd->tx_m = NULL; 2158 txd->tx_desc = &rd->jme_tx_ring[i]; 2159 txd->tx_ndesc = 0; 2160 } 2161 2162 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 2163 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2164 } 2165 2166 void 2167 jme_init_ssb(struct jme_softc *sc) 2168 { 2169 struct jme_ring_data *rd; 2170 2171 rd = &sc->jme_rdata; 2172 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2173 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0, 2174 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2175 } 2176 2177 int 2178 jme_init_rx_ring(struct jme_softc *sc) 2179 { 2180 struct jme_ring_data *rd; 2181 struct jme_rxdesc *rxd; 2182 int i; 2183 2184 KASSERT(sc->jme_cdata.jme_rxhead == NULL && 2185 sc->jme_cdata.jme_rxtail == NULL && 2186 sc->jme_cdata.jme_rxlen == 0); 2187 sc->jme_cdata.jme_rx_cons = 0; 2188 2189 rd = &sc->jme_rdata; 2190 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2191 for (i = 0; i < JME_RX_RING_CNT; i++) { 2192 int error; 2193 2194 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2195 rxd->rx_m = NULL; 2196 rxd->rx_desc = &rd->jme_rx_ring[i]; 2197 error = jme_newbuf(sc, rxd, 1); 2198 if (error) 2199 return (error); 2200 } 2201 2202 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 2203 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2204 2205 return (0); 2206 } 2207 2208 int 2209 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init) 2210 { 2211 struct jme_desc *desc; 2212 struct mbuf *m; 2213 bus_dmamap_t map; 2214 int error; 2215 2216 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2217 if (m == NULL) 2218 return (ENOBUFS); 2219 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2220 if (!(m->m_flags & M_EXT)) { 2221 m_freem(m); 2222 return (ENOBUFS); 2223 } 2224 2225 /* 2226 * JMC250 has 64bit boundary alignment limitation so jme(4) 2227 * takes advantage of 10 bytes padding feature of hardware 2228 * in order not to copy entire frame to align IP header on 2229 * 32bit boundary. 2230 */ 2231 m->m_len = m->m_pkthdr.len = MCLBYTES; 2232 2233 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2234 sc->jme_cdata.jme_rx_sparemap, 2235 m, BUS_DMA_NOWAIT); 2236 if (error != 0) { 2237 if (!error) { 2238 bus_dmamap_unload(sc->sc_dmat, 2239 sc->jme_cdata.jme_rx_sparemap); 2240 error = EFBIG; 2241 printf("%s: too many segments?!\n", 2242 sc->sc_dev.dv_xname); 2243 } 2244 m_freem(m); 2245 2246 if (init) 2247 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2248 return (error); 2249 } 2250 2251 if (rxd->rx_m != NULL) { 2252 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2253 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2254 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2255 } 2256 map = rxd->rx_dmamap; 2257 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2258 sc->jme_cdata.jme_rx_sparemap = map; 2259 rxd->rx_m = m; 2260 2261 desc = rxd->rx_desc; 2262 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len); 2263 desc->addr_lo = 2264 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2265 desc->addr_hi = 2266 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2267 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2268 2269 return (0); 2270 } 2271 2272 void 2273 jme_set_vlan(struct jme_softc *sc) 2274 { 2275 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2276 uint32_t reg; 2277 2278 reg = CSR_READ_4(sc, JME_RXMAC); 2279 reg &= ~RXMAC_VLAN_ENB; 2280 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2281 reg |= RXMAC_VLAN_ENB; 2282 CSR_WRITE_4(sc, JME_RXMAC, reg); 2283 } 2284 2285 void 2286 jme_set_filter(struct jme_softc *sc) 2287 { 2288 struct arpcom *ac = &sc->sc_arpcom; 2289 struct ifnet *ifp = &ac->ac_if; 2290 struct ether_multi *enm; 2291 struct ether_multistep step; 2292 uint32_t crc; 2293 uint32_t mchash[2]; 2294 uint32_t rxcfg; 2295 2296 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2297 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2298 RXMAC_ALLMULTI); 2299 ifp->if_flags &= ~IFF_ALLMULTI; 2300 2301 /* 2302 * Always accept frames destined to our station address. 2303 * Always accept broadcast frames. 2304 */ 2305 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2306 2307 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2308 ifp->if_flags |= IFF_ALLMULTI; 2309 if (ifp->if_flags & IFF_PROMISC) 2310 rxcfg |= RXMAC_PROMISC; 2311 else 2312 rxcfg |= RXMAC_ALLMULTI; 2313 mchash[0] = mchash[1] = 0xFFFFFFFF; 2314 } else { 2315 /* 2316 * Set up the multicast address filter by passing all 2317 * multicast addresses through a CRC generator, and then 2318 * using the low-order 6 bits as an index into the 64 bit 2319 * multicast hash table. The high order bits select the 2320 * register, while the rest of the bits select the bit 2321 * within the register. 2322 */ 2323 rxcfg |= RXMAC_MULTICAST; 2324 bzero(mchash, sizeof(mchash)); 2325 2326 ETHER_FIRST_MULTI(step, ac, enm); 2327 while (enm != NULL) { 2328 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2329 2330 /* Just want the 6 least significant bits. */ 2331 crc &= 0x3f; 2332 2333 /* Set the corresponding bit in the hash table. */ 2334 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2335 2336 ETHER_NEXT_MULTI(step, enm); 2337 } 2338 } 2339 2340 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2341 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2342 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2343 } 2344