1 /* $OpenBSD: if_jme.c,v 1.20 2009/09/13 14:42:52 krw Exp $ */ 2 /*- 3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $ 30 */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <net/if_types.h> 62 #include <net/if_vlan_var.h> 63 64 #if NBPFILTER > 0 65 #include <net/bpf.h> 66 #endif 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/mii/jmphyreg.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_jmereg.h> 77 #include <dev/pci/if_jmevar.h> 78 79 /* Define the following to disable printing Rx errors. */ 80 #undef JME_SHOW_ERRORS 81 82 int jme_match(struct device *, void *, void *); 83 void jme_attach(struct device *, struct device *, void *); 84 int jme_detach(struct device *, int); 85 86 int jme_miibus_readreg(struct device *, int, int); 87 void jme_miibus_writereg(struct device *, int, int, int); 88 void jme_miibus_statchg(struct device *); 89 90 int jme_init(struct ifnet *); 91 int jme_ioctl(struct ifnet *, u_long, caddr_t); 92 93 void jme_start(struct ifnet *); 94 void jme_watchdog(struct ifnet *); 95 void jme_mediastatus(struct ifnet *, struct ifmediareq *); 96 int jme_mediachange(struct ifnet *); 97 98 int jme_intr(void *); 99 void jme_txeof(struct jme_softc *); 100 void jme_rxeof(struct jme_softc *); 101 102 int jme_dma_alloc(struct jme_softc *); 103 void jme_dma_free(struct jme_softc *); 104 int jme_init_rx_ring(struct jme_softc *); 105 void jme_init_tx_ring(struct jme_softc *); 106 void jme_init_ssb(struct jme_softc *); 107 int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int); 108 int jme_encap(struct jme_softc *, struct mbuf **); 109 void jme_rxpkt(struct jme_softc *); 110 111 void jme_tick(void *); 112 void jme_stop(struct jme_softc *); 113 void jme_reset(struct jme_softc *); 114 void jme_set_vlan(struct jme_softc *); 115 void jme_set_filter(struct jme_softc *); 116 void jme_stop_tx(struct jme_softc *); 117 void jme_stop_rx(struct jme_softc *); 118 void jme_mac_config(struct jme_softc *); 119 void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 120 int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 121 int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 122 void jme_discard_rxbufs(struct jme_softc *, int, int); 123 #ifdef notyet 124 void jme_setwol(struct jme_softc *); 125 void jme_setlinkspeed(struct jme_softc *); 126 #endif 127 128 /* 129 * Devices supported by this driver. 130 */ 131 const struct pci_matchid jme_devices[] = { 132 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 }, 133 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 } 134 }; 135 136 struct cfattach jme_ca = { 137 sizeof (struct jme_softc), jme_match, jme_attach 138 }; 139 140 struct cfdriver jme_cd = { 141 NULL, "jme", DV_IFNET 142 }; 143 144 int jmedebug = 0; 145 #define DPRINTF(x) do { if (jmedebug) printf x; } while (0) 146 147 /* 148 * Read a PHY register on the MII of the JMC250. 149 */ 150 int 151 jme_miibus_readreg(struct device *dev, int phy, int reg) 152 { 153 struct jme_softc *sc = (struct jme_softc *)dev; 154 uint32_t val; 155 int i; 156 157 /* For FPGA version, PHY address 0 should be ignored. */ 158 if (sc->jme_caps & JME_CAP_FPGA) { 159 if (phy == 0) 160 return (0); 161 } else { 162 if (sc->jme_phyaddr != phy) 163 return (0); 164 } 165 166 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 167 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 168 169 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 170 DELAY(1); 171 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 172 break; 173 } 174 if (i == 0) { 175 printf("%s: phy read timeout: phy %d, reg %d\n", 176 sc->sc_dev.dv_xname, phy, reg); 177 return (0); 178 } 179 180 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 181 } 182 183 /* 184 * Write a PHY register on the MII of the JMC250. 185 */ 186 void 187 jme_miibus_writereg(struct device *dev, int phy, int reg, int val) 188 { 189 struct jme_softc *sc = (struct jme_softc *)dev; 190 int i; 191 192 /* For FPGA version, PHY address 0 should be ignored. */ 193 if (sc->jme_caps & JME_CAP_FPGA) { 194 if (phy == 0) 195 return; 196 } else { 197 if (sc->jme_phyaddr != phy) 198 return; 199 } 200 201 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 202 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 203 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 204 205 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 206 DELAY(1); 207 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 208 break; 209 } 210 if (i == 0) { 211 printf("%s: phy write timeout: phy %d, reg %d\n", 212 sc->sc_dev.dv_xname, phy, reg); 213 } 214 } 215 216 /* 217 * Callback from MII layer when media changes. 218 */ 219 void 220 jme_miibus_statchg(struct device *dev) 221 { 222 struct jme_softc *sc = (struct jme_softc *)dev; 223 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 224 struct mii_data *mii; 225 struct jme_txdesc *txd; 226 bus_addr_t paddr; 227 int i; 228 229 if ((ifp->if_flags & IFF_RUNNING) == 0) 230 return; 231 232 mii = &sc->sc_miibus; 233 234 sc->jme_flags &= ~JME_FLAG_LINK; 235 if ((mii->mii_media_status & IFM_AVALID) != 0) { 236 switch (IFM_SUBTYPE(mii->mii_media_active)) { 237 case IFM_10_T: 238 case IFM_100_TX: 239 sc->jme_flags |= JME_FLAG_LINK; 240 break; 241 case IFM_1000_T: 242 if (sc->jme_caps & JME_CAP_FASTETH) 243 break; 244 sc->jme_flags |= JME_FLAG_LINK; 245 break; 246 default: 247 break; 248 } 249 } 250 251 /* 252 * Disabling Rx/Tx MACs have a side-effect of resetting 253 * JME_TXNDA/JME_RXNDA register to the first address of 254 * Tx/Rx descriptor address. So driver should reset its 255 * internal procucer/consumer pointer and reclaim any 256 * allocated resources. Note, just saving the value of 257 * JME_TXNDA and JME_RXNDA registers before stopping MAC 258 * and restoring JME_TXNDA/JME_RXNDA register is not 259 * sufficient to make sure correct MAC state because 260 * stopping MAC operation can take a while and hardware 261 * might have updated JME_TXNDA/JME_RXNDA registers 262 * during the stop operation. 263 */ 264 265 /* Disable interrupts */ 266 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 267 268 /* Stop driver */ 269 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 270 ifp->if_timer = 0; 271 timeout_del(&sc->jme_tick_ch); 272 273 /* Stop receiver/transmitter. */ 274 jme_stop_rx(sc); 275 jme_stop_tx(sc); 276 277 jme_rxeof(sc); 278 if (sc->jme_cdata.jme_rxhead != NULL) 279 m_freem(sc->jme_cdata.jme_rxhead); 280 JME_RXCHAIN_RESET(sc); 281 282 jme_txeof(sc); 283 if (sc->jme_cdata.jme_tx_cnt != 0) { 284 /* Remove queued packets for transmit. */ 285 for (i = 0; i < JME_TX_RING_CNT; i++) { 286 txd = &sc->jme_cdata.jme_txdesc[i]; 287 if (txd->tx_m != NULL) { 288 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 289 m_freem(txd->tx_m); 290 txd->tx_m = NULL; 291 txd->tx_ndesc = 0; 292 ifp->if_oerrors++; 293 } 294 } 295 } 296 297 /* 298 * Reuse configured Rx descriptors and reset 299 * procuder/consumer index. 300 */ 301 sc->jme_cdata.jme_rx_cons = 0; 302 303 jme_init_tx_ring(sc); 304 305 /* Initialize shadow status block. */ 306 jme_init_ssb(sc); 307 308 /* Program MAC with resolved speed/duplex/flow-control. */ 309 if (sc->jme_flags & JME_FLAG_LINK) { 310 jme_mac_config(sc); 311 312 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 313 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 314 315 /* Set Tx ring address to the hardware. */ 316 paddr = JME_TX_RING_ADDR(sc, 0); 317 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 318 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 319 320 /* Set Rx ring address to the hardware. */ 321 paddr = JME_RX_RING_ADDR(sc, 0); 322 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 323 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 324 325 /* Restart receiver/transmitter. */ 326 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 327 RXCSR_RXQ_START); 328 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 329 } 330 331 ifp->if_flags |= IFF_RUNNING; 332 ifp->if_flags &= ~IFF_OACTIVE; 333 timeout_add_sec(&sc->jme_tick_ch, 1); 334 335 /* Reenable interrupts. */ 336 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 337 } 338 339 /* 340 * Get the current interface media status. 341 */ 342 void 343 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 344 { 345 struct jme_softc *sc = ifp->if_softc; 346 struct mii_data *mii = &sc->sc_miibus; 347 348 mii_pollstat(mii); 349 ifmr->ifm_status = mii->mii_media_status; 350 ifmr->ifm_active = mii->mii_media_active; 351 } 352 353 /* 354 * Set hardware to newly-selected media. 355 */ 356 int 357 jme_mediachange(struct ifnet *ifp) 358 { 359 struct jme_softc *sc = ifp->if_softc; 360 struct mii_data *mii = &sc->sc_miibus; 361 int error; 362 363 if (mii->mii_instance != 0) { 364 struct mii_softc *miisc; 365 366 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 367 mii_phy_reset(miisc); 368 } 369 error = mii_mediachg(mii); 370 371 return (error); 372 } 373 374 int 375 jme_match(struct device *dev, void *match, void *aux) 376 { 377 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices, 378 sizeof (jme_devices) / sizeof (jme_devices[0])); 379 } 380 381 int 382 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 383 { 384 uint32_t reg; 385 int i; 386 387 *val = 0; 388 for (i = JME_TIMEOUT; i > 0; i--) { 389 reg = CSR_READ_4(sc, JME_SMBCSR); 390 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 391 break; 392 DELAY(1); 393 } 394 395 if (i == 0) { 396 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname); 397 return (ETIMEDOUT); 398 } 399 400 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 401 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 402 for (i = JME_TIMEOUT; i > 0; i--) { 403 DELAY(1); 404 reg = CSR_READ_4(sc, JME_SMBINTF); 405 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 406 break; 407 } 408 409 if (i == 0) { 410 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname); 411 return (ETIMEDOUT); 412 } 413 414 reg = CSR_READ_4(sc, JME_SMBINTF); 415 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 416 417 return (0); 418 } 419 420 int 421 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 422 { 423 uint8_t fup, reg, val; 424 uint32_t offset; 425 int match; 426 427 offset = 0; 428 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 429 fup != JME_EEPROM_SIG0) 430 return (ENOENT); 431 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 432 fup != JME_EEPROM_SIG1) 433 return (ENOENT); 434 match = 0; 435 do { 436 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 437 break; 438 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 439 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 440 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 441 break; 442 if (reg >= JME_PAR0 && 443 reg < JME_PAR0 + ETHER_ADDR_LEN) { 444 if (jme_eeprom_read_byte(sc, offset + 2, 445 &val) != 0) 446 break; 447 eaddr[reg - JME_PAR0] = val; 448 match++; 449 } 450 } 451 /* Check for the end of EEPROM descriptor. */ 452 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 453 break; 454 /* Try next eeprom descriptor. */ 455 offset += JME_EEPROM_DESC_BYTES; 456 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 457 458 if (match == ETHER_ADDR_LEN) 459 return (0); 460 461 return (ENOENT); 462 } 463 464 void 465 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 466 { 467 uint32_t par0, par1; 468 469 /* Read station address. */ 470 par0 = CSR_READ_4(sc, JME_PAR0); 471 par1 = CSR_READ_4(sc, JME_PAR1); 472 par1 &= 0xFFFF; 473 474 eaddr[0] = (par0 >> 0) & 0xFF; 475 eaddr[1] = (par0 >> 8) & 0xFF; 476 eaddr[2] = (par0 >> 16) & 0xFF; 477 eaddr[3] = (par0 >> 24) & 0xFF; 478 eaddr[4] = (par1 >> 0) & 0xFF; 479 eaddr[5] = (par1 >> 8) & 0xFF; 480 } 481 482 void 483 jme_attach(struct device *parent, struct device *self, void *aux) 484 { 485 struct jme_softc *sc = (struct jme_softc *)self; 486 struct pci_attach_args *pa = aux; 487 pci_chipset_tag_t pc = pa->pa_pc; 488 pci_intr_handle_t ih; 489 const char *intrstr; 490 pcireg_t memtype; 491 492 struct ifnet *ifp; 493 uint32_t reg; 494 int error = 0; 495 496 /* 497 * Allocate IO memory 498 * 499 * JMC250 supports both memory mapped and I/O register space 500 * access. Because I/O register access should use different 501 * BARs to access registers it's waste of time to use I/O 502 * register spce access. JMC250 uses 16K to map entire memory 503 * space. 504 */ 505 506 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR); 507 if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt, 508 &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) { 509 printf(": can't map mem space\n"); 510 return; 511 } 512 513 if (pci_intr_map(pa, &ih) != 0) { 514 printf(": can't map interrupt\n"); 515 return; 516 } 517 518 /* 519 * Allocate IRQ 520 */ 521 intrstr = pci_intr_string(pc, ih); 522 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc, 523 sc->sc_dev.dv_xname); 524 if (sc->sc_irq_handle == NULL) { 525 printf(": could not establish interrupt"); 526 if (intrstr != NULL) 527 printf(" at %s", intrstr); 528 printf("\n"); 529 return; 530 } 531 printf(": %s", intrstr); 532 533 sc->sc_dmat = pa->pa_dmat; 534 sc->jme_pct = pa->pa_pc; 535 sc->jme_pcitag = pa->pa_tag; 536 537 /* 538 * Extract FPGA revision 539 */ 540 reg = CSR_READ_4(sc, JME_CHIPMODE); 541 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 542 CHIPMODE_NOT_FPGA) { 543 sc->jme_caps |= JME_CAP_FPGA; 544 545 if (jmedebug) { 546 printf("%s: FPGA revision : 0x%04x\n", 547 sc->sc_dev.dv_xname, 548 (reg & CHIPMODE_FPGA_REV_MASK) >> 549 CHIPMODE_FPGA_REV_SHIFT); 550 } 551 } 552 553 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT; 554 555 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 && 556 PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2) 557 sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS; 558 559 /* Reset the ethernet controller. */ 560 jme_reset(sc); 561 562 /* Get station address. */ 563 reg = CSR_READ_4(sc, JME_SMBCSR); 564 if (reg & SMBCSR_EEPROM_PRESENT) 565 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr); 566 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 567 if (error != 0 && (jmedebug)) { 568 printf("%s: ethernet hardware address " 569 "not found in EEPROM.\n", sc->sc_dev.dv_xname); 570 } 571 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr); 572 } 573 574 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 575 576 /* 577 * Save PHY address. 578 * Integrated JR0211 has fixed PHY address whereas FPGA version 579 * requires PHY probing to get correct PHY address. 580 */ 581 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 582 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 583 GPREG0_PHY_ADDR_MASK; 584 if (jmedebug) { 585 printf("%s: PHY is at address %d.\n", 586 sc->sc_dev.dv_xname, sc->jme_phyaddr); 587 } 588 } else { 589 sc->jme_phyaddr = 0; 590 } 591 592 /* Set max allowable DMA size. */ 593 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 594 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 595 596 #ifdef notyet 597 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 598 sc->jme_caps |= JME_CAP_PMCAP; 599 #endif 600 601 /* Allocate DMA stuffs */ 602 error = jme_dma_alloc(sc); 603 if (error) 604 goto fail; 605 606 ifp = &sc->sc_arpcom.ac_if; 607 ifp->if_softc = sc; 608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 609 ifp->if_init = jme_init; 610 ifp->if_ioctl = jme_ioctl; 611 ifp->if_start = jme_start; 612 ifp->if_watchdog = jme_watchdog; 613 ifp->if_baudrate = IF_Gbps(1); 614 IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1); 615 IFQ_SET_READY(&ifp->if_snd); 616 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 617 618 ifp->if_capabilities = IFCAP_VLAN_MTU; 619 620 #ifdef JME_CHECKSUM 621 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 622 IFCAP_CSUM_UDPv4; 623 #endif 624 625 #if NVLAN > 0 626 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 627 #endif 628 629 /* Set up MII bus. */ 630 sc->sc_miibus.mii_ifp = ifp; 631 sc->sc_miibus.mii_readreg = jme_miibus_readreg; 632 sc->sc_miibus.mii_writereg = jme_miibus_writereg; 633 sc->sc_miibus.mii_statchg = jme_miibus_statchg; 634 635 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange, 636 jme_mediastatus); 637 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 638 MII_OFFSET_ANY, 0); 639 640 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 641 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 642 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 643 0, NULL); 644 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 645 } else 646 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 647 648 /* 649 * Save PHYADDR for FPGA mode PHY not handled, not production hw 650 */ 651 652 if_attach(ifp); 653 ether_ifattach(ifp); 654 655 timeout_set(&sc->jme_tick_ch, jme_tick, sc); 656 657 return; 658 fail: 659 jme_detach(&sc->sc_dev, 0); 660 } 661 662 int 663 jme_detach(struct device *self, int flags) 664 { 665 struct jme_softc *sc = (struct jme_softc *)self; 666 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 667 int s; 668 669 s = splnet(); 670 jme_stop(sc); 671 splx(s); 672 673 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 674 675 /* Delete all remaining media. */ 676 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 677 678 ether_ifdetach(ifp); 679 if_detach(ifp); 680 jme_dma_free(sc); 681 682 if (sc->sc_irq_handle != NULL) { 683 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle); 684 sc->sc_irq_handle = NULL; 685 } 686 687 return (0); 688 } 689 690 int 691 jme_dma_alloc(struct jme_softc *sc) 692 { 693 struct jme_txdesc *txd; 694 struct jme_rxdesc *rxd; 695 int error, i, nsegs; 696 697 /* 698 * Create DMA stuffs for TX ring 699 */ 700 701 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1, 702 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT, 703 &sc->jme_cdata.jme_tx_ring_map); 704 if (error) 705 return (ENOBUFS); 706 707 /* Allocate DMA'able memory for TX ring */ 708 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0, 709 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs, 710 BUS_DMA_WAITOK); 711 /* XXX zero */ 712 if (error) { 713 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 714 sc->sc_dev.dv_xname); 715 return error; 716 } 717 718 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg, 719 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring, 720 BUS_DMA_NOWAIT); 721 if (error) 722 return (ENOBUFS); 723 724 /* Load the DMA map for Tx ring. */ 725 error = bus_dmamap_load(sc->sc_dmat, 726 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 727 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 728 if (error) { 729 printf("%s: could not load DMA'able memory for Tx ring.\n", 730 sc->sc_dev.dv_xname); 731 bus_dmamem_free(sc->sc_dmat, 732 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1); 733 return error; 734 } 735 sc->jme_rdata.jme_tx_ring_paddr = 736 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr; 737 738 /* 739 * Create DMA stuffs for RX ring 740 */ 741 742 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1, 743 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT, 744 &sc->jme_cdata.jme_rx_ring_map); 745 if (error) 746 return (ENOBUFS); 747 748 /* Allocate DMA'able memory for RX ring */ 749 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0, 750 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs, 751 BUS_DMA_WAITOK); 752 /* XXX zero */ 753 if (error) { 754 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 755 sc->sc_dev.dv_xname); 756 return error; 757 } 758 759 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg, 760 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring, 761 BUS_DMA_NOWAIT); 762 if (error) 763 return (ENOBUFS); 764 765 bzero(sc->jme_rdata.jme_rx_ring, JME_RX_RING_SIZE); 766 767 /* Load the DMA map for Rx ring. */ 768 error = bus_dmamap_load(sc->sc_dmat, 769 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 770 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 771 if (error) { 772 printf("%s: could not load DMA'able memory for Rx ring.\n", 773 sc->sc_dev.dv_xname); 774 bus_dmamem_free(sc->sc_dmat, 775 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 776 return error; 777 } 778 sc->jme_rdata.jme_rx_ring_paddr = 779 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr; 780 781 #if 0 782 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 783 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE; 784 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE; 785 if ((JME_ADDR_HI(tx_ring_end) != 786 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 787 (JME_ADDR_HI(rx_ring_end) != 788 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 789 printf("%s: 4GB boundary crossed, switching to 32bit " 790 "DMA address mode.\n", sc->sc_dev.dv_xname); 791 jme_dma_free(sc); 792 /* Limit DMA address space to 32bit and try again. */ 793 lowaddr = BUS_SPACE_MAXADDR_32BIT; 794 goto again; 795 } 796 #endif 797 798 /* 799 * Create DMA stuffs for shadow status block 800 */ 801 802 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1, 803 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map); 804 if (error) 805 return (ENOBUFS); 806 807 /* Allocate DMA'able memory for shared status block. */ 808 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0, 809 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK); 810 if (error) { 811 printf("%s: could not allocate DMA'able " 812 "memory for shared status block.\n", sc->sc_dev.dv_xname); 813 return error; 814 } 815 816 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg, 817 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block, 818 BUS_DMA_NOWAIT); 819 if (error) 820 return (ENOBUFS); 821 822 /* Load the DMA map for shared status block */ 823 error = bus_dmamap_load(sc->sc_dmat, 824 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 825 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT); 826 if (error) { 827 printf("%s: could not load DMA'able memory " 828 "for shared status block.\n", sc->sc_dev.dv_xname); 829 bus_dmamem_free(sc->sc_dmat, 830 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 831 return error; 832 } 833 sc->jme_rdata.jme_ssb_block_paddr = 834 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr; 835 836 /* 837 * Create DMA stuffs for TX buffers 838 */ 839 840 /* Create DMA maps for Tx buffers. */ 841 for (i = 0; i < JME_TX_RING_CNT; i++) { 842 txd = &sc->jme_cdata.jme_txdesc[i]; 843 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE, 844 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 845 &txd->tx_dmamap); 846 if (error) { 847 int j; 848 849 printf("%s: could not create %dth Tx dmamap.\n", 850 sc->sc_dev.dv_xname, i); 851 852 for (j = 0; j < i; ++j) { 853 txd = &sc->jme_cdata.jme_txdesc[j]; 854 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 855 } 856 return error; 857 } 858 859 } 860 861 /* 862 * Create DMA stuffs for RX buffers 863 */ 864 865 /* Create DMA maps for Rx buffers. */ 866 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 867 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap); 868 if (error) { 869 printf("%s: could not create spare Rx dmamap.\n", 870 sc->sc_dev.dv_xname); 871 return error; 872 } 873 for (i = 0; i < JME_RX_RING_CNT; i++) { 874 rxd = &sc->jme_cdata.jme_rxdesc[i]; 875 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 876 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 877 if (error) { 878 int j; 879 880 printf("%s: could not create %dth Rx dmamap.\n", 881 sc->sc_dev.dv_xname, i); 882 883 for (j = 0; j < i; ++j) { 884 rxd = &sc->jme_cdata.jme_rxdesc[j]; 885 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 886 } 887 bus_dmamap_destroy(sc->sc_dmat, 888 sc->jme_cdata.jme_rx_sparemap); 889 sc->jme_cdata.jme_rx_tag = NULL; 890 return error; 891 } 892 } 893 894 return 0; 895 } 896 897 void 898 jme_dma_free(struct jme_softc *sc) 899 { 900 struct jme_txdesc *txd; 901 struct jme_rxdesc *rxd; 902 int i; 903 904 /* Tx ring */ 905 bus_dmamap_unload(sc->sc_dmat, 906 sc->jme_cdata.jme_tx_ring_map); 907 bus_dmamem_free(sc->sc_dmat, 908 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1); 909 910 /* Rx ring */ 911 bus_dmamap_unload(sc->sc_dmat, 912 sc->jme_cdata.jme_rx_ring_map); 913 bus_dmamem_free(sc->sc_dmat, 914 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 915 916 /* Tx buffers */ 917 for (i = 0; i < JME_TX_RING_CNT; i++) { 918 txd = &sc->jme_cdata.jme_txdesc[i]; 919 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 920 } 921 922 /* Rx buffers */ 923 for (i = 0; i < JME_RX_RING_CNT; i++) { 924 rxd = &sc->jme_cdata.jme_rxdesc[i]; 925 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 926 } 927 bus_dmamap_destroy(sc->sc_dmat, 928 sc->jme_cdata.jme_rx_sparemap); 929 930 /* Shadow status block. */ 931 bus_dmamap_unload(sc->sc_dmat, 932 sc->jme_cdata.jme_ssb_map); 933 bus_dmamem_free(sc->sc_dmat, 934 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 935 } 936 937 #ifdef notyet 938 /* 939 * Unlike other ethernet controllers, JMC250 requires 940 * explicit resetting link speed to 10/100Mbps as gigabit 941 * link will cunsume more power than 375mA. 942 * Note, we reset the link speed to 10/100Mbps with 943 * auto-negotiation but we don't know whether that operation 944 * would succeed or not as we have no control after powering 945 * off. If the renegotiation fail WOL may not work. Running 946 * at 1Gbps draws more power than 375mA at 3.3V which is 947 * specified in PCI specification and that would result in 948 * complete shutdowning power to ethernet controller. 949 * 950 * TODO 951 * Save current negotiated media speed/duplex/flow-control 952 * to softc and restore the same link again after resuming. 953 * PHY handling such as power down/resetting to 100Mbps 954 * may be better handled in suspend method in phy driver. 955 */ 956 void 957 jme_setlinkspeed(struct jme_softc *sc) 958 { 959 struct mii_data *mii; 960 int aneg, i; 961 962 JME_LOCK_ASSERT(sc); 963 964 mii = &sc->sc_miibus; 965 mii_pollstat(mii); 966 aneg = 0; 967 if ((mii->mii_media_status & IFM_AVALID) != 0) { 968 switch IFM_SUBTYPE(mii->mii_media_active) { 969 case IFM_10_T: 970 case IFM_100_TX: 971 return; 972 case IFM_1000_T: 973 aneg++; 974 default: 975 break; 976 } 977 } 978 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0); 979 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR, 980 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 981 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR, 982 BMCR_AUTOEN | BMCR_STARTNEG); 983 DELAY(1000); 984 if (aneg != 0) { 985 /* Poll link state until jme(4) get a 10/100 link. */ 986 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 987 mii_pollstat(mii); 988 if ((mii->mii_media_status & IFM_AVALID) != 0) { 989 switch (IFM_SUBTYPE(mii->mii_media_active)) { 990 case IFM_10_T: 991 case IFM_100_TX: 992 jme_mac_config(sc); 993 return; 994 default: 995 break; 996 } 997 } 998 JME_UNLOCK(sc); 999 pause("jmelnk", hz); 1000 JME_LOCK(sc); 1001 } 1002 if (i == MII_ANEGTICKS_GIGE) 1003 printf("%s: establishing link failed, " 1004 "WOL may not work!\n", sc->sc_dev.dv_xname); 1005 } 1006 /* 1007 * No link, force MAC to have 100Mbps, full-duplex link. 1008 * This is the last resort and may/may not work. 1009 */ 1010 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1011 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1012 jme_mac_config(sc); 1013 } 1014 1015 void 1016 jme_setwol(struct jme_softc *sc) 1017 { 1018 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1019 uint32_t gpr, pmcs; 1020 uint16_t pmstat; 1021 int pmc; 1022 1023 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) { 1024 /* No PME capability, PHY power down. */ 1025 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1026 MII_BMCR, BMCR_PDOWN); 1027 return; 1028 } 1029 1030 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1031 pmcs = CSR_READ_4(sc, JME_PMCS); 1032 pmcs &= ~PMCS_WOL_ENB_MASK; 1033 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1034 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1035 /* Enable PME message. */ 1036 gpr |= GPREG0_PME_ENB; 1037 /* For gigabit controllers, reset link speed to 10/100. */ 1038 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1039 jme_setlinkspeed(sc); 1040 } 1041 1042 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1043 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1044 1045 /* Request PME. */ 1046 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2); 1047 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1048 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1049 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1050 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1051 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1052 /* No WOL, PHY power down. */ 1053 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1054 MII_BMCR, BMCR_PDOWN); 1055 } 1056 } 1057 #endif 1058 1059 int 1060 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1061 { 1062 struct jme_txdesc *txd; 1063 struct jme_desc *desc; 1064 struct mbuf *m; 1065 int maxsegs; 1066 int error, i, prod; 1067 uint32_t cflags; 1068 1069 prod = sc->jme_cdata.jme_tx_prod; 1070 txd = &sc->jme_cdata.jme_txdesc[prod]; 1071 1072 maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) - 1073 (JME_TXD_RSVD + 1); 1074 if (maxsegs > JME_MAXTXSEGS) 1075 maxsegs = JME_MAXTXSEGS; 1076 if (maxsegs < (sc->jme_txd_spare - 1)) 1077 panic("%s: not enough segments %d\n", sc->sc_dev.dv_xname, 1078 maxsegs); 1079 1080 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, 1081 *m_head, BUS_DMA_NOWAIT); 1082 if (error != 0) { 1083 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1084 error = EFBIG; 1085 } 1086 if (error == EFBIG) { 1087 if (m_defrag(*m_head, M_DONTWAIT)) { 1088 printf("%s: can't defrag TX mbuf\n", 1089 sc->sc_dev.dv_xname); 1090 m_freem(*m_head); 1091 *m_head = NULL; 1092 return (ENOBUFS); 1093 } 1094 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1095 txd->tx_dmamap, *m_head, 1096 BUS_DMA_NOWAIT); 1097 if (error != 0) { 1098 printf("%s: could not load defragged TX mbuf\n", 1099 sc->sc_dev.dv_xname); 1100 m_freem(*m_head); 1101 *m_head = NULL; 1102 return (error); 1103 } 1104 } else if (error) { 1105 printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname); 1106 return (error); 1107 } 1108 1109 m = *m_head; 1110 cflags = 0; 1111 1112 /* Configure checksum offload. */ 1113 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1114 cflags |= JME_TD_IPCSUM; 1115 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1116 cflags |= JME_TD_TCPCSUM; 1117 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1118 cflags |= JME_TD_UDPCSUM; 1119 1120 #if NVLAN > 0 1121 /* Configure VLAN. */ 1122 if (m->m_flags & M_VLANTAG) { 1123 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1124 cflags |= JME_TD_VLAN_TAG; 1125 } 1126 #endif 1127 1128 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1129 desc->flags = htole32(cflags); 1130 desc->buflen = 0; 1131 desc->addr_hi = htole32(m->m_pkthdr.len); 1132 desc->addr_lo = 0; 1133 sc->jme_cdata.jme_tx_cnt++; 1134 KASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD); 1135 JME_DESC_INC(prod, JME_TX_RING_CNT); 1136 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) { 1137 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1138 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1139 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len); 1140 desc->addr_hi = 1141 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr)); 1142 desc->addr_lo = 1143 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr)); 1144 1145 sc->jme_cdata.jme_tx_cnt++; 1146 KASSERT(sc->jme_cdata.jme_tx_cnt <= 1147 JME_TX_RING_CNT - JME_TXD_RSVD); 1148 JME_DESC_INC(prod, JME_TX_RING_CNT); 1149 } 1150 1151 /* Update producer index. */ 1152 sc->jme_cdata.jme_tx_prod = prod; 1153 /* 1154 * Finally request interrupt and give the first descriptor 1155 * owenership to hardware. 1156 */ 1157 desc = txd->tx_desc; 1158 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1159 1160 txd->tx_m = m; 1161 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + 1; 1162 1163 /* Sync descriptors. */ 1164 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1165 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1166 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1167 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1168 1169 return (0); 1170 } 1171 1172 void 1173 jme_start(struct ifnet *ifp) 1174 { 1175 struct jme_softc *sc = ifp->if_softc; 1176 struct mbuf *m_head; 1177 int enq = 0; 1178 1179 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1180 return; 1181 1182 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1183 jme_txeof(sc); 1184 1185 for (;;) { 1186 /* 1187 * Check number of available TX descs, always 1188 * leave JME_TXD_RSVD free TX descs. 1189 */ 1190 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1191 JME_TX_RING_CNT - JME_TXD_RSVD) { 1192 ifp->if_flags |= IFF_OACTIVE; 1193 break; 1194 } 1195 1196 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1197 if (m_head == NULL) 1198 break; 1199 1200 /* 1201 * Pack the data into the transmit ring. If we 1202 * don't have room, set the OACTIVE flag and wait 1203 * for the NIC to drain the ring. 1204 */ 1205 if (jme_encap(sc, &m_head)) { 1206 if (m_head == NULL) { 1207 ifp->if_oerrors++; 1208 break; 1209 } 1210 ifp->if_flags |= IFF_OACTIVE; 1211 break; 1212 } 1213 enq++; 1214 1215 #if NBPFILTER > 0 1216 /* 1217 * If there's a BPF listener, bounce a copy of this frame 1218 * to him. 1219 */ 1220 if (ifp->if_bpf != NULL) 1221 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1222 #endif 1223 } 1224 1225 if (enq > 0) { 1226 /* 1227 * Reading TXCSR takes very long time under heavy load 1228 * so cache TXCSR value and writes the ORed value with 1229 * the kick command to the TXCSR. This saves one register 1230 * access cycle. 1231 */ 1232 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1233 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1234 /* Set a timeout in case the chip goes out to lunch. */ 1235 ifp->if_timer = JME_TX_TIMEOUT; 1236 } 1237 } 1238 1239 void 1240 jme_watchdog(struct ifnet *ifp) 1241 { 1242 struct jme_softc *sc = ifp->if_softc; 1243 1244 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1245 printf("%s: watchdog timeout (missed link)\n", 1246 sc->sc_dev.dv_xname); 1247 ifp->if_oerrors++; 1248 jme_init(ifp); 1249 return; 1250 } 1251 1252 jme_txeof(sc); 1253 if (sc->jme_cdata.jme_tx_cnt == 0) { 1254 printf("%s: watchdog timeout (missed Tx interrupts) " 1255 "-- recovering\n", sc->sc_dev.dv_xname); 1256 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1257 jme_start(ifp); 1258 return; 1259 } 1260 1261 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1262 ifp->if_oerrors++; 1263 jme_init(ifp); 1264 1265 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1266 jme_start(ifp); 1267 } 1268 1269 int 1270 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1271 { 1272 struct jme_softc *sc = ifp->if_softc; 1273 struct mii_data *mii = &sc->sc_miibus; 1274 struct ifaddr *ifa = (struct ifaddr *)data; 1275 struct ifreq *ifr = (struct ifreq *)data; 1276 int error = 0, s; 1277 1278 s = splnet(); 1279 1280 switch (cmd) { 1281 case SIOCSIFADDR: 1282 ifp->if_flags |= IFF_UP; 1283 if (!(ifp->if_flags & IFF_RUNNING)) 1284 jme_init(ifp); 1285 #ifdef INET 1286 if (ifa->ifa_addr->sa_family == AF_INET) 1287 arp_ifinit(&sc->sc_arpcom, ifa); 1288 #endif 1289 break; 1290 1291 case SIOCSIFFLAGS: 1292 if (ifp->if_flags & IFF_UP) { 1293 if (ifp->if_flags & IFF_RUNNING) 1294 error = ENETRESET; 1295 else 1296 jme_init(ifp); 1297 } else { 1298 if (ifp->if_flags & IFF_RUNNING) 1299 jme_stop(sc); 1300 } 1301 break; 1302 1303 case SIOCSIFMEDIA: 1304 case SIOCGIFMEDIA: 1305 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1306 break; 1307 1308 default: 1309 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1310 } 1311 1312 if (error == ENETRESET) { 1313 if (ifp->if_flags & IFF_RUNNING) 1314 jme_set_filter(sc); 1315 error = 0; 1316 } 1317 1318 splx(s); 1319 return (error); 1320 } 1321 1322 void 1323 jme_mac_config(struct jme_softc *sc) 1324 { 1325 struct mii_data *mii; 1326 uint32_t ghc, rxmac, txmac, txpause, gp1; 1327 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1328 1329 mii = &sc->sc_miibus; 1330 1331 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1332 DELAY(10); 1333 CSR_WRITE_4(sc, JME_GHC, 0); 1334 ghc = 0; 1335 rxmac = CSR_READ_4(sc, JME_RXMAC); 1336 rxmac &= ~RXMAC_FC_ENB; 1337 txmac = CSR_READ_4(sc, JME_TXMAC); 1338 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1339 txpause = CSR_READ_4(sc, JME_TXPFC); 1340 txpause &= ~TXPFC_PAUSE_ENB; 1341 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1342 ghc |= GHC_FULL_DUPLEX; 1343 rxmac &= ~RXMAC_COLL_DET_ENB; 1344 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1345 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1346 TXMAC_FRAME_BURST); 1347 #ifdef notyet 1348 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1349 txpause |= TXPFC_PAUSE_ENB; 1350 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1351 rxmac |= RXMAC_FC_ENB; 1352 #endif 1353 /* Disable retry transmit timer/retry limit. */ 1354 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1355 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1356 } else { 1357 rxmac |= RXMAC_COLL_DET_ENB; 1358 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1359 /* Enable retry transmit timer/retry limit. */ 1360 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1361 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1362 } 1363 1364 /* 1365 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1366 */ 1367 gp1 = CSR_READ_4(sc, JME_GPREG1); 1368 gp1 &= ~GPREG1_HALF_PATCH; 1369 1370 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1371 hdx = 1; 1372 1373 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1374 case IFM_10_T: 1375 ghc |= GHC_SPEED_10; 1376 if (hdx) 1377 gp1 |= GPREG1_HALF_PATCH; 1378 break; 1379 1380 case IFM_100_TX: 1381 ghc |= GHC_SPEED_100; 1382 if (hdx) 1383 gp1 |= GPREG1_HALF_PATCH; 1384 1385 /* 1386 * Use extended FIFO depth to workaround CRC errors 1387 * emitted by chips before JMC250B 1388 */ 1389 phyconf = JMPHY_CONF_EXTFIFO; 1390 break; 1391 1392 case IFM_1000_T: 1393 if (sc->jme_caps & JME_CAP_FASTETH) 1394 break; 1395 1396 ghc |= GHC_SPEED_1000; 1397 if (hdx) 1398 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1399 break; 1400 1401 default: 1402 break; 1403 } 1404 1405 if (sc->jme_revfm >= 2) { 1406 /* set clock sources for tx mac and offload engine */ 1407 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1408 ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000; 1409 else 1410 ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100; 1411 } 1412 1413 CSR_WRITE_4(sc, JME_GHC, ghc); 1414 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1415 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1416 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1417 1418 if (sc->jme_workaround & JME_WA_CRCERRORS) { 1419 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1420 JMPHY_CONF, phyconf); 1421 } 1422 if (sc->jme_workaround & JME_WA_PACKETLOSS) 1423 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1424 } 1425 1426 int 1427 jme_intr(void *xsc) 1428 { 1429 struct jme_softc *sc = xsc; 1430 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1431 uint32_t status; 1432 int claimed = 0; 1433 1434 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1435 if (status == 0 || status == 0xFFFFFFFF) 1436 return (0); 1437 1438 /* Disable interrupts. */ 1439 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1440 1441 status = CSR_READ_4(sc, JME_INTR_STATUS); 1442 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1443 goto back; 1444 1445 /* Reset PCC counter/timer and Ack interrupts. */ 1446 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1447 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1448 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1449 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1450 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1451 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1452 1453 if (ifp->if_flags & IFF_RUNNING) { 1454 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1455 jme_rxeof(sc); 1456 1457 if (status & INTR_RXQ_DESC_EMPTY) { 1458 /* 1459 * Notify hardware availability of new Rx buffers. 1460 * Reading RXCSR takes very long time under heavy 1461 * load so cache RXCSR value and writes the ORed 1462 * value with the kick command to the RXCSR. This 1463 * saves one register access cycle. 1464 */ 1465 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1466 RXCSR_RX_ENB | RXCSR_RXQ_START); 1467 } 1468 1469 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1470 jme_txeof(sc); 1471 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1472 jme_start(ifp); 1473 } 1474 } 1475 claimed = 1; 1476 back: 1477 /* Reenable interrupts. */ 1478 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1479 1480 return (claimed); 1481 } 1482 1483 void 1484 jme_txeof(struct jme_softc *sc) 1485 { 1486 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1487 struct jme_txdesc *txd; 1488 uint32_t status; 1489 int cons, nsegs; 1490 1491 cons = sc->jme_cdata.jme_tx_cons; 1492 if (cons == sc->jme_cdata.jme_tx_prod) 1493 return; 1494 1495 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1496 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1497 1498 /* 1499 * Go through our Tx list and free mbufs for those 1500 * frames which have been transmitted. 1501 */ 1502 while (cons != sc->jme_cdata.jme_tx_prod) { 1503 txd = &sc->jme_cdata.jme_txdesc[cons]; 1504 1505 if (txd->tx_m == NULL) 1506 panic("%s: freeing NULL mbuf!\n", sc->sc_dev.dv_xname); 1507 1508 status = letoh32(txd->tx_desc->flags); 1509 if ((status & JME_TD_OWN) == JME_TD_OWN) 1510 break; 1511 1512 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 1513 ifp->if_oerrors++; 1514 } else { 1515 ifp->if_opackets++; 1516 if (status & JME_TD_COLLISION) { 1517 ifp->if_collisions += 1518 letoh32(txd->tx_desc->buflen) & 1519 JME_TD_BUF_LEN_MASK; 1520 } 1521 } 1522 1523 /* 1524 * Only the first descriptor of multi-descriptor 1525 * transmission is updated so driver have to skip entire 1526 * chained buffers for the transmiited frame. In other 1527 * words, JME_TD_OWN bit is valid only at the first 1528 * descriptor of a multi-descriptor transmission. 1529 */ 1530 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1531 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 1532 JME_DESC_INC(cons, JME_TX_RING_CNT); 1533 } 1534 1535 /* Reclaim transferred mbufs. */ 1536 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1537 m_freem(txd->tx_m); 1538 txd->tx_m = NULL; 1539 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 1540 if (sc->jme_cdata.jme_tx_cnt < 0) 1541 panic("%s: Active Tx desc counter was garbled\n", 1542 sc->sc_dev.dv_xname); 1543 txd->tx_ndesc = 0; 1544 } 1545 sc->jme_cdata.jme_tx_cons = cons; 1546 1547 if (sc->jme_cdata.jme_tx_cnt == 0) 1548 ifp->if_timer = 0; 1549 1550 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 1551 JME_TX_RING_CNT - JME_TXD_RSVD) 1552 ifp->if_flags &= ~IFF_OACTIVE; 1553 1554 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1555 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1556 } 1557 1558 void 1559 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 1560 { 1561 int i; 1562 1563 for (i = 0; i < count; ++i) { 1564 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 1565 1566 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 1567 desc->buflen = htole32(MCLBYTES); 1568 JME_DESC_INC(cons, JME_RX_RING_CNT); 1569 } 1570 } 1571 1572 /* Receive a frame. */ 1573 void 1574 jme_rxpkt(struct jme_softc *sc) 1575 { 1576 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1577 struct jme_desc *desc; 1578 struct jme_rxdesc *rxd; 1579 struct mbuf *mp, *m; 1580 uint32_t flags, status; 1581 int cons, count, nsegs; 1582 1583 cons = sc->jme_cdata.jme_rx_cons; 1584 desc = &sc->jme_rdata.jme_rx_ring[cons]; 1585 flags = letoh32(desc->flags); 1586 status = letoh32(desc->buflen); 1587 nsegs = JME_RX_NSEGS(status); 1588 1589 if (status & JME_RX_ERR_STAT) { 1590 ifp->if_ierrors++; 1591 jme_discard_rxbufs(sc, cons, nsegs); 1592 #ifdef JME_SHOW_ERRORS 1593 printf("%s : receive error = 0x%b\n", 1594 sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS); 1595 #endif 1596 sc->jme_cdata.jme_rx_cons += nsegs; 1597 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1598 return; 1599 } 1600 1601 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 1602 for (count = 0; count < nsegs; count++, 1603 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 1604 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 1605 mp = rxd->rx_m; 1606 1607 /* Add a new receive buffer to the ring. */ 1608 if (jme_newbuf(sc, rxd, 0) != 0) { 1609 ifp->if_iqdrops++; 1610 /* Reuse buffer. */ 1611 jme_discard_rxbufs(sc, cons, nsegs - count); 1612 if (sc->jme_cdata.jme_rxhead != NULL) { 1613 m_freem(sc->jme_cdata.jme_rxhead); 1614 JME_RXCHAIN_RESET(sc); 1615 } 1616 break; 1617 } 1618 1619 /* 1620 * Assume we've received a full sized frame. 1621 * Actual size is fixed when we encounter the end of 1622 * multi-segmented frame. 1623 */ 1624 mp->m_len = MCLBYTES; 1625 1626 /* Chain received mbufs. */ 1627 if (sc->jme_cdata.jme_rxhead == NULL) { 1628 sc->jme_cdata.jme_rxhead = mp; 1629 sc->jme_cdata.jme_rxtail = mp; 1630 } else { 1631 /* 1632 * Receive processor can receive a maximum frame 1633 * size of 65535 bytes. 1634 */ 1635 mp->m_flags &= ~M_PKTHDR; 1636 sc->jme_cdata.jme_rxtail->m_next = mp; 1637 sc->jme_cdata.jme_rxtail = mp; 1638 } 1639 1640 if (count == nsegs - 1) { 1641 /* Last desc. for this frame. */ 1642 m = sc->jme_cdata.jme_rxhead; 1643 /* XXX assert PKTHDR? */ 1644 m->m_flags |= M_PKTHDR; 1645 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 1646 if (nsegs > 1) { 1647 /* Set first mbuf size. */ 1648 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 1649 /* Set last mbuf size. */ 1650 mp->m_len = sc->jme_cdata.jme_rxlen - 1651 ((MCLBYTES - JME_RX_PAD_BYTES) + 1652 (MCLBYTES * (nsegs - 2))); 1653 } else { 1654 m->m_len = sc->jme_cdata.jme_rxlen; 1655 } 1656 m->m_pkthdr.rcvif = ifp; 1657 1658 /* 1659 * Account for 10bytes auto padding which is used 1660 * to align IP header on 32bit boundary. Also note, 1661 * CRC bytes is automatically removed by the 1662 * hardware. 1663 */ 1664 m->m_data += JME_RX_PAD_BYTES; 1665 1666 /* Set checksum information. */ 1667 if (flags & (JME_RD_IPV4|JME_RD_IPV6)) { 1668 if ((flags & JME_RD_IPV4) && 1669 (flags & JME_RD_IPCSUM)) 1670 m->m_pkthdr.csum_flags |= 1671 M_IPV4_CSUM_IN_OK; 1672 if ((flags & JME_RD_MORE_FRAG) == 0 && 1673 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 1674 (JME_RD_TCP | JME_RD_TCPCSUM) || 1675 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 1676 (JME_RD_UDP | JME_RD_UDPCSUM))) { 1677 m->m_pkthdr.csum_flags |= 1678 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1679 } 1680 } 1681 1682 #if NVLAN > 0 1683 /* Check for VLAN tagged packets. */ 1684 if (flags & JME_RD_VLAN_TAG) { 1685 m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK; 1686 m->m_flags |= M_VLANTAG; 1687 } 1688 #endif 1689 1690 #if NBPFILTER > 0 1691 if (ifp->if_bpf) 1692 bpf_mtap_ether(ifp->if_bpf, m, 1693 BPF_DIRECTION_IN); 1694 #endif 1695 1696 ifp->if_ipackets++; 1697 /* Pass it on. */ 1698 ether_input_mbuf(ifp, m); 1699 1700 /* Reset mbuf chains. */ 1701 JME_RXCHAIN_RESET(sc); 1702 } 1703 } 1704 1705 sc->jme_cdata.jme_rx_cons += nsegs; 1706 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1707 } 1708 1709 void 1710 jme_rxeof(struct jme_softc *sc) 1711 { 1712 struct jme_desc *desc; 1713 int nsegs, prog, pktlen; 1714 1715 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1716 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1717 1718 prog = 0; 1719 for (;;) { 1720 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 1721 if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 1722 break; 1723 if ((letoh32(desc->buflen) & JME_RD_VALID) == 0) 1724 break; 1725 1726 /* 1727 * Check number of segments against received bytes. 1728 * Non-matching value would indicate that hardware 1729 * is still trying to update Rx descriptors. I'm not 1730 * sure whether this check is needed. 1731 */ 1732 nsegs = JME_RX_NSEGS(letoh32(desc->buflen)); 1733 pktlen = JME_RX_BYTES(letoh32(desc->buflen)); 1734 if (nsegs != howmany(pktlen, MCLBYTES)) { 1735 printf("%s: RX fragment count(%d) " 1736 "and packet size(%d) mismach\n", 1737 sc->sc_dev.dv_xname, nsegs, pktlen); 1738 break; 1739 } 1740 1741 /* Received a frame. */ 1742 jme_rxpkt(sc); 1743 prog++; 1744 } 1745 1746 if (prog > 0) { 1747 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1748 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1749 } 1750 } 1751 1752 void 1753 jme_tick(void *xsc) 1754 { 1755 struct jme_softc *sc = xsc; 1756 struct mii_data *mii = &sc->sc_miibus; 1757 int s; 1758 1759 s = splnet(); 1760 mii_tick(mii); 1761 timeout_add_sec(&sc->jme_tick_ch, 1); 1762 splx(s); 1763 } 1764 1765 void 1766 jme_reset(struct jme_softc *sc) 1767 { 1768 #ifdef foo 1769 /* Stop receiver, transmitter. */ 1770 jme_stop_rx(sc); 1771 jme_stop_tx(sc); 1772 #endif 1773 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1774 DELAY(10); 1775 CSR_WRITE_4(sc, JME_GHC, 0); 1776 } 1777 1778 int 1779 jme_init(struct ifnet *ifp) 1780 { 1781 struct jme_softc *sc = ifp->if_softc; 1782 struct mii_data *mii; 1783 uint8_t eaddr[ETHER_ADDR_LEN]; 1784 bus_addr_t paddr; 1785 uint32_t reg; 1786 int error; 1787 1788 /* 1789 * Cancel any pending I/O. 1790 */ 1791 jme_stop(sc); 1792 1793 /* 1794 * Reset the chip to a known state. 1795 */ 1796 jme_reset(sc); 1797 1798 /* 1799 * Since we always use 64bit address mode for transmitting, 1800 * each Tx request requires one more dummy descriptor. 1801 */ 1802 sc->jme_txd_spare = 1803 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1; 1804 KASSERT(sc->jme_txd_spare >= 2); 1805 1806 /* Init descriptors. */ 1807 error = jme_init_rx_ring(sc); 1808 if (error != 0) { 1809 printf("%s: initialization failed: no memory for Rx buffers.\n", 1810 sc->sc_dev.dv_xname); 1811 jme_stop(sc); 1812 return (error); 1813 } 1814 jme_init_tx_ring(sc); 1815 1816 /* Initialize shadow status block. */ 1817 jme_init_ssb(sc); 1818 1819 /* Reprogram the station address. */ 1820 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1821 CSR_WRITE_4(sc, JME_PAR0, 1822 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 1823 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 1824 1825 /* 1826 * Configure Tx queue. 1827 * Tx priority queue weight value : 0 1828 * Tx FIFO threshold for processing next packet : 16QW 1829 * Maximum Tx DMA length : 512 1830 * Allow Tx DMA burst. 1831 */ 1832 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 1833 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 1834 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 1835 sc->jme_txcsr |= sc->jme_tx_dma_size; 1836 sc->jme_txcsr |= TXCSR_DMA_BURST; 1837 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 1838 1839 /* Set Tx descriptor counter. */ 1840 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 1841 1842 /* Set Tx ring address to the hardware. */ 1843 paddr = JME_TX_RING_ADDR(sc, 0); 1844 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 1845 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 1846 1847 /* Configure TxMAC parameters. */ 1848 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 1849 reg |= TXMAC_THRESH_1_PKT; 1850 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 1851 CSR_WRITE_4(sc, JME_TXMAC, reg); 1852 1853 /* 1854 * Configure Rx queue. 1855 * FIFO full threshold for transmitting Tx pause packet : 128T 1856 * FIFO threshold for processing next packet : 128QW 1857 * Rx queue 0 select 1858 * Max Rx DMA length : 128 1859 * Rx descriptor retry : 32 1860 * Rx descriptor retry time gap : 256ns 1861 * Don't receive runt/bad frame. 1862 */ 1863 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 1864 1865 /* 1866 * Since Rx FIFO size is 4K bytes, receiving frames larger 1867 * than 4K bytes will suffer from Rx FIFO overruns. So 1868 * decrease FIFO threshold to reduce the FIFO overruns for 1869 * frames larger than 4000 bytes. 1870 * For best performance of standard MTU sized frames use 1871 * maximum allowable FIFO threshold, which is 32QW for 1872 * chips with a full mask >= 2 otherwise 128QW. FIFO 1873 * thresholds of 64QW and 128QW are not valid for chips 1874 * with a full mask >= 2. 1875 */ 1876 if (sc->jme_revfm >= 2) 1877 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1878 else { 1879 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1880 ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE) 1881 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1882 else 1883 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 1884 } 1885 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 1886 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 1887 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 1888 /* XXX TODO DROP_BAD */ 1889 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 1890 1891 /* Set Rx descriptor counter. */ 1892 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 1893 1894 /* Set Rx ring address to the hardware. */ 1895 paddr = JME_RX_RING_ADDR(sc, 0); 1896 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 1897 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 1898 1899 /* Clear receive filter. */ 1900 CSR_WRITE_4(sc, JME_RXMAC, 0); 1901 1902 /* Set up the receive filter. */ 1903 jme_set_filter(sc); 1904 jme_set_vlan(sc); 1905 1906 /* 1907 * Disable all WOL bits as WOL can interfere normal Rx 1908 * operation. Also clear WOL detection status bits. 1909 */ 1910 reg = CSR_READ_4(sc, JME_PMCS); 1911 reg &= ~PMCS_WOL_ENB_MASK; 1912 CSR_WRITE_4(sc, JME_PMCS, reg); 1913 1914 /* 1915 * Pad 10bytes right before received frame. This will greatly 1916 * help Rx performance on strict-alignment architectures as 1917 * it does not need to copy the frame to align the payload. 1918 */ 1919 reg = CSR_READ_4(sc, JME_RXMAC); 1920 reg |= RXMAC_PAD_10BYTES; 1921 reg |= RXMAC_CSUM_ENB; 1922 CSR_WRITE_4(sc, JME_RXMAC, reg); 1923 1924 /* Configure general purpose reg0 */ 1925 reg = CSR_READ_4(sc, JME_GPREG0); 1926 reg &= ~GPREG0_PCC_UNIT_MASK; 1927 /* Set PCC timer resolution to micro-seconds unit. */ 1928 reg |= GPREG0_PCC_UNIT_US; 1929 /* 1930 * Disable all shadow register posting as we have to read 1931 * JME_INTR_STATUS register in jme_intr. Also it seems 1932 * that it's hard to synchronize interrupt status between 1933 * hardware and software with shadow posting due to 1934 * requirements of bus_dmamap_sync(9). 1935 */ 1936 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 1937 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 1938 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 1939 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 1940 /* Disable posting of DW0. */ 1941 reg &= ~GPREG0_POST_DW0_ENB; 1942 /* Clear PME message. */ 1943 reg &= ~GPREG0_PME_ENB; 1944 /* Set PHY address. */ 1945 reg &= ~GPREG0_PHY_ADDR_MASK; 1946 reg |= sc->jme_phyaddr; 1947 CSR_WRITE_4(sc, JME_GPREG0, reg); 1948 1949 /* Configure Tx queue 0 packet completion coalescing. */ 1950 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1951 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 1952 PCCTX_COAL_TO_MASK; 1953 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1954 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 1955 PCCTX_COAL_PKT_MASK; 1956 reg |= PCCTX_COAL_TXQ0; 1957 CSR_WRITE_4(sc, JME_PCCTX, reg); 1958 1959 /* Configure Rx queue 0 packet completion coalescing. */ 1960 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1961 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 1962 PCCRX_COAL_TO_MASK; 1963 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1964 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 1965 PCCRX_COAL_PKT_MASK; 1966 CSR_WRITE_4(sc, JME_PCCRX0, reg); 1967 1968 /* Configure shadow status block but don't enable posting. */ 1969 paddr = sc->jme_rdata.jme_ssb_block_paddr; 1970 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 1971 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 1972 1973 /* Disable Timer 1 and Timer 2. */ 1974 CSR_WRITE_4(sc, JME_TIMER1, 0); 1975 CSR_WRITE_4(sc, JME_TIMER2, 0); 1976 1977 /* Configure retry transmit period, retry limit value. */ 1978 CSR_WRITE_4(sc, JME_TXTRHD, 1979 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 1980 TXTRHD_RT_PERIOD_MASK) | 1981 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 1982 TXTRHD_RT_LIMIT_SHIFT)); 1983 1984 /* Disable RSS. */ 1985 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 1986 1987 /* Initialize the interrupt mask. */ 1988 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1989 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 1990 1991 /* 1992 * Enabling Tx/Rx DMA engines and Rx queue processing is 1993 * done after detection of valid link in jme_miibus_statchg. 1994 */ 1995 sc->jme_flags &= ~JME_FLAG_LINK; 1996 1997 /* Set the current media. */ 1998 mii = &sc->sc_miibus; 1999 mii_mediachg(mii); 2000 2001 timeout_add_sec(&sc->jme_tick_ch, 1); 2002 2003 ifp->if_flags |= IFF_RUNNING; 2004 ifp->if_flags &= ~IFF_OACTIVE; 2005 2006 return (0); 2007 } 2008 2009 void 2010 jme_stop(struct jme_softc *sc) 2011 { 2012 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2013 struct jme_txdesc *txd; 2014 struct jme_rxdesc *rxd; 2015 int i; 2016 2017 /* 2018 * Mark the interface down and cancel the watchdog timer. 2019 */ 2020 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2021 ifp->if_timer = 0; 2022 2023 timeout_del(&sc->jme_tick_ch); 2024 sc->jme_flags &= ~JME_FLAG_LINK; 2025 2026 /* 2027 * Disable interrupts. 2028 */ 2029 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2030 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2031 2032 /* Disable updating shadow status block. */ 2033 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2034 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2035 2036 /* Stop receiver, transmitter. */ 2037 jme_stop_rx(sc); 2038 jme_stop_tx(sc); 2039 2040 #ifdef foo 2041 /* Reclaim Rx/Tx buffers that have been completed. */ 2042 jme_rxeof(sc); 2043 if (sc->jme_cdata.jme_rxhead != NULL) 2044 m_freem(sc->jme_cdata.jme_rxhead); 2045 JME_RXCHAIN_RESET(sc); 2046 jme_txeof(sc); 2047 #endif 2048 2049 /* 2050 * Free partial finished RX segments 2051 */ 2052 if (sc->jme_cdata.jme_rxhead != NULL) 2053 m_freem(sc->jme_cdata.jme_rxhead); 2054 JME_RXCHAIN_RESET(sc); 2055 2056 /* 2057 * Free RX and TX mbufs still in the queues. 2058 */ 2059 for (i = 0; i < JME_RX_RING_CNT; i++) { 2060 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2061 if (rxd->rx_m != NULL) { 2062 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2063 m_freem(rxd->rx_m); 2064 rxd->rx_m = NULL; 2065 } 2066 } 2067 for (i = 0; i < JME_TX_RING_CNT; i++) { 2068 txd = &sc->jme_cdata.jme_txdesc[i]; 2069 if (txd->tx_m != NULL) { 2070 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2071 m_freem(txd->tx_m); 2072 txd->tx_m = NULL; 2073 txd->tx_ndesc = 0; 2074 } 2075 } 2076 } 2077 2078 void 2079 jme_stop_tx(struct jme_softc *sc) 2080 { 2081 uint32_t reg; 2082 int i; 2083 2084 reg = CSR_READ_4(sc, JME_TXCSR); 2085 if ((reg & TXCSR_TX_ENB) == 0) 2086 return; 2087 reg &= ~TXCSR_TX_ENB; 2088 CSR_WRITE_4(sc, JME_TXCSR, reg); 2089 for (i = JME_TIMEOUT; i > 0; i--) { 2090 DELAY(1); 2091 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2092 break; 2093 } 2094 if (i == 0) 2095 printf("%s: stopping transmitter timeout!\n", 2096 sc->sc_dev.dv_xname); 2097 } 2098 2099 void 2100 jme_stop_rx(struct jme_softc *sc) 2101 { 2102 uint32_t reg; 2103 int i; 2104 2105 reg = CSR_READ_4(sc, JME_RXCSR); 2106 if ((reg & RXCSR_RX_ENB) == 0) 2107 return; 2108 reg &= ~RXCSR_RX_ENB; 2109 CSR_WRITE_4(sc, JME_RXCSR, reg); 2110 for (i = JME_TIMEOUT; i > 0; i--) { 2111 DELAY(1); 2112 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2113 break; 2114 } 2115 if (i == 0) 2116 printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname); 2117 } 2118 2119 void 2120 jme_init_tx_ring(struct jme_softc *sc) 2121 { 2122 struct jme_ring_data *rd; 2123 struct jme_txdesc *txd; 2124 int i; 2125 2126 sc->jme_cdata.jme_tx_prod = 0; 2127 sc->jme_cdata.jme_tx_cons = 0; 2128 sc->jme_cdata.jme_tx_cnt = 0; 2129 2130 rd = &sc->jme_rdata; 2131 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2132 for (i = 0; i < JME_TX_RING_CNT; i++) { 2133 txd = &sc->jme_cdata.jme_txdesc[i]; 2134 txd->tx_m = NULL; 2135 txd->tx_desc = &rd->jme_tx_ring[i]; 2136 txd->tx_ndesc = 0; 2137 } 2138 2139 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 2140 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2141 } 2142 2143 void 2144 jme_init_ssb(struct jme_softc *sc) 2145 { 2146 struct jme_ring_data *rd; 2147 2148 rd = &sc->jme_rdata; 2149 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2150 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0, 2151 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2152 } 2153 2154 int 2155 jme_init_rx_ring(struct jme_softc *sc) 2156 { 2157 struct jme_ring_data *rd; 2158 struct jme_rxdesc *rxd; 2159 int i; 2160 2161 KASSERT(sc->jme_cdata.jme_rxhead == NULL && 2162 sc->jme_cdata.jme_rxtail == NULL && 2163 sc->jme_cdata.jme_rxlen == 0); 2164 sc->jme_cdata.jme_rx_cons = 0; 2165 2166 rd = &sc->jme_rdata; 2167 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2168 for (i = 0; i < JME_RX_RING_CNT; i++) { 2169 int error; 2170 2171 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2172 rxd->rx_m = NULL; 2173 rxd->rx_desc = &rd->jme_rx_ring[i]; 2174 error = jme_newbuf(sc, rxd, 1); 2175 if (error) 2176 return (error); 2177 } 2178 2179 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 2180 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2181 2182 return (0); 2183 } 2184 2185 int 2186 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init) 2187 { 2188 struct jme_desc *desc; 2189 struct mbuf *m; 2190 bus_dmamap_t map; 2191 int error; 2192 2193 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2194 if (m == NULL) 2195 return (ENOBUFS); 2196 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2197 if (!(m->m_flags & M_EXT)) { 2198 m_freem(m); 2199 return (ENOBUFS); 2200 } 2201 2202 /* 2203 * JMC250 has 64bit boundary alignment limitation so jme(4) 2204 * takes advantage of 10 bytes padding feature of hardware 2205 * in order not to copy entire frame to align IP header on 2206 * 32bit boundary. 2207 */ 2208 m->m_len = m->m_pkthdr.len = MCLBYTES; 2209 2210 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2211 sc->jme_cdata.jme_rx_sparemap, 2212 m, BUS_DMA_NOWAIT); 2213 if (error != 0) { 2214 if (!error) { 2215 bus_dmamap_unload(sc->sc_dmat, 2216 sc->jme_cdata.jme_rx_sparemap); 2217 error = EFBIG; 2218 printf("%s: too many segments?!\n", 2219 sc->sc_dev.dv_xname); 2220 } 2221 m_freem(m); 2222 2223 if (init) 2224 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2225 return (error); 2226 } 2227 2228 if (rxd->rx_m != NULL) { 2229 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2230 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2231 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2232 } 2233 map = rxd->rx_dmamap; 2234 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2235 sc->jme_cdata.jme_rx_sparemap = map; 2236 rxd->rx_m = m; 2237 2238 desc = rxd->rx_desc; 2239 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len); 2240 desc->addr_lo = 2241 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2242 desc->addr_hi = 2243 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2244 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2245 2246 return (0); 2247 } 2248 2249 void 2250 jme_set_vlan(struct jme_softc *sc) 2251 { 2252 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2253 uint32_t reg; 2254 2255 reg = CSR_READ_4(sc, JME_RXMAC); 2256 reg &= ~RXMAC_VLAN_ENB; 2257 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2258 reg |= RXMAC_VLAN_ENB; 2259 CSR_WRITE_4(sc, JME_RXMAC, reg); 2260 } 2261 2262 void 2263 jme_set_filter(struct jme_softc *sc) 2264 { 2265 struct arpcom *ac = &sc->sc_arpcom; 2266 struct ifnet *ifp = &ac->ac_if; 2267 struct ether_multi *enm; 2268 struct ether_multistep step; 2269 uint32_t crc; 2270 uint32_t mchash[2]; 2271 uint32_t rxcfg; 2272 2273 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2274 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2275 RXMAC_ALLMULTI); 2276 ifp->if_flags &= ~IFF_ALLMULTI; 2277 2278 /* 2279 * Always accept frames destined to our station address. 2280 * Always accept broadcast frames. 2281 */ 2282 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2283 2284 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2285 ifp->if_flags |= IFF_ALLMULTI; 2286 if (ifp->if_flags & IFF_PROMISC) 2287 rxcfg |= RXMAC_PROMISC; 2288 else 2289 rxcfg |= RXMAC_ALLMULTI; 2290 mchash[0] = mchash[1] = 0xFFFFFFFF; 2291 } else { 2292 /* 2293 * Set up the multicast address filter by passing all 2294 * multicast addresses through a CRC generator, and then 2295 * using the low-order 6 bits as an index into the 64 bit 2296 * multicast hash table. The high order bits select the 2297 * register, while the rest of the bits select the bit 2298 * within the register. 2299 */ 2300 rxcfg |= RXMAC_MULTICAST; 2301 bzero(mchash, sizeof(mchash)); 2302 2303 ETHER_FIRST_MULTI(step, ac, enm); 2304 while (enm != NULL) { 2305 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2306 2307 /* Just want the 6 least significant bits. */ 2308 crc &= 0x3f; 2309 2310 /* Set the corresponding bit in the hash table. */ 2311 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2312 2313 ETHER_NEXT_MULTI(step, enm); 2314 } 2315 } 2316 2317 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2318 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2319 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2320 } 2321