1 /* $OpenBSD: if_jme.c,v 1.25 2011/04/05 18:01:21 henning Exp $ */ 2 /*- 3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $ 30 */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <net/if_types.h> 62 #include <net/if_vlan_var.h> 63 64 #if NBPFILTER > 0 65 #include <net/bpf.h> 66 #endif 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/mii/jmphyreg.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_jmereg.h> 77 #include <dev/pci/if_jmevar.h> 78 79 /* Define the following to disable printing Rx errors. */ 80 #undef JME_SHOW_ERRORS 81 82 int jme_match(struct device *, void *, void *); 83 void jme_attach(struct device *, struct device *, void *); 84 int jme_detach(struct device *, int); 85 86 int jme_miibus_readreg(struct device *, int, int); 87 void jme_miibus_writereg(struct device *, int, int, int); 88 void jme_miibus_statchg(struct device *); 89 90 int jme_init(struct ifnet *); 91 int jme_ioctl(struct ifnet *, u_long, caddr_t); 92 93 void jme_start(struct ifnet *); 94 void jme_watchdog(struct ifnet *); 95 void jme_mediastatus(struct ifnet *, struct ifmediareq *); 96 int jme_mediachange(struct ifnet *); 97 98 int jme_intr(void *); 99 void jme_txeof(struct jme_softc *); 100 void jme_rxeof(struct jme_softc *); 101 102 int jme_dma_alloc(struct jme_softc *); 103 void jme_dma_free(struct jme_softc *); 104 int jme_init_rx_ring(struct jme_softc *); 105 void jme_init_tx_ring(struct jme_softc *); 106 void jme_init_ssb(struct jme_softc *); 107 int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int); 108 int jme_encap(struct jme_softc *, struct mbuf **); 109 void jme_rxpkt(struct jme_softc *); 110 111 void jme_tick(void *); 112 void jme_stop(struct jme_softc *); 113 void jme_reset(struct jme_softc *); 114 void jme_set_vlan(struct jme_softc *); 115 void jme_iff(struct jme_softc *); 116 void jme_stop_tx(struct jme_softc *); 117 void jme_stop_rx(struct jme_softc *); 118 void jme_mac_config(struct jme_softc *); 119 void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 120 int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 121 int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 122 void jme_discard_rxbufs(struct jme_softc *, int, int); 123 #ifdef notyet 124 void jme_setwol(struct jme_softc *); 125 void jme_setlinkspeed(struct jme_softc *); 126 #endif 127 128 /* 129 * Devices supported by this driver. 130 */ 131 const struct pci_matchid jme_devices[] = { 132 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 }, 133 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 } 134 }; 135 136 struct cfattach jme_ca = { 137 sizeof (struct jme_softc), jme_match, jme_attach 138 }; 139 140 struct cfdriver jme_cd = { 141 NULL, "jme", DV_IFNET 142 }; 143 144 int jmedebug = 0; 145 #define DPRINTF(x) do { if (jmedebug) printf x; } while (0) 146 147 /* 148 * Read a PHY register on the MII of the JMC250. 149 */ 150 int 151 jme_miibus_readreg(struct device *dev, int phy, int reg) 152 { 153 struct jme_softc *sc = (struct jme_softc *)dev; 154 uint32_t val; 155 int i; 156 157 /* For FPGA version, PHY address 0 should be ignored. */ 158 if (sc->jme_caps & JME_CAP_FPGA) { 159 if (phy == 0) 160 return (0); 161 } else { 162 if (sc->jme_phyaddr != phy) 163 return (0); 164 } 165 166 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 167 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 168 169 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 170 DELAY(1); 171 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 172 break; 173 } 174 if (i == 0) { 175 printf("%s: phy read timeout: phy %d, reg %d\n", 176 sc->sc_dev.dv_xname, phy, reg); 177 return (0); 178 } 179 180 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 181 } 182 183 /* 184 * Write a PHY register on the MII of the JMC250. 185 */ 186 void 187 jme_miibus_writereg(struct device *dev, int phy, int reg, int val) 188 { 189 struct jme_softc *sc = (struct jme_softc *)dev; 190 int i; 191 192 /* For FPGA version, PHY address 0 should be ignored. */ 193 if (sc->jme_caps & JME_CAP_FPGA) { 194 if (phy == 0) 195 return; 196 } else { 197 if (sc->jme_phyaddr != phy) 198 return; 199 } 200 201 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 202 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 203 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 204 205 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 206 DELAY(1); 207 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 208 break; 209 } 210 if (i == 0) { 211 printf("%s: phy write timeout: phy %d, reg %d\n", 212 sc->sc_dev.dv_xname, phy, reg); 213 } 214 } 215 216 /* 217 * Callback from MII layer when media changes. 218 */ 219 void 220 jme_miibus_statchg(struct device *dev) 221 { 222 struct jme_softc *sc = (struct jme_softc *)dev; 223 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 224 struct mii_data *mii; 225 struct jme_txdesc *txd; 226 bus_addr_t paddr; 227 int i; 228 229 if ((ifp->if_flags & IFF_RUNNING) == 0) 230 return; 231 232 mii = &sc->sc_miibus; 233 234 sc->jme_flags &= ~JME_FLAG_LINK; 235 if ((mii->mii_media_status & IFM_AVALID) != 0) { 236 switch (IFM_SUBTYPE(mii->mii_media_active)) { 237 case IFM_10_T: 238 case IFM_100_TX: 239 sc->jme_flags |= JME_FLAG_LINK; 240 break; 241 case IFM_1000_T: 242 if (sc->jme_caps & JME_CAP_FASTETH) 243 break; 244 sc->jme_flags |= JME_FLAG_LINK; 245 break; 246 default: 247 break; 248 } 249 } 250 251 /* 252 * Disabling Rx/Tx MACs have a side-effect of resetting 253 * JME_TXNDA/JME_RXNDA register to the first address of 254 * Tx/Rx descriptor address. So driver should reset its 255 * internal procucer/consumer pointer and reclaim any 256 * allocated resources. Note, just saving the value of 257 * JME_TXNDA and JME_RXNDA registers before stopping MAC 258 * and restoring JME_TXNDA/JME_RXNDA register is not 259 * sufficient to make sure correct MAC state because 260 * stopping MAC operation can take a while and hardware 261 * might have updated JME_TXNDA/JME_RXNDA registers 262 * during the stop operation. 263 */ 264 265 /* Disable interrupts */ 266 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 267 268 /* Stop driver */ 269 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 270 ifp->if_timer = 0; 271 timeout_del(&sc->jme_tick_ch); 272 273 /* Stop receiver/transmitter. */ 274 jme_stop_rx(sc); 275 jme_stop_tx(sc); 276 277 jme_rxeof(sc); 278 if (sc->jme_cdata.jme_rxhead != NULL) 279 m_freem(sc->jme_cdata.jme_rxhead); 280 JME_RXCHAIN_RESET(sc); 281 282 jme_txeof(sc); 283 if (sc->jme_cdata.jme_tx_cnt != 0) { 284 /* Remove queued packets for transmit. */ 285 for (i = 0; i < JME_TX_RING_CNT; i++) { 286 txd = &sc->jme_cdata.jme_txdesc[i]; 287 if (txd->tx_m != NULL) { 288 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 289 m_freem(txd->tx_m); 290 txd->tx_m = NULL; 291 txd->tx_ndesc = 0; 292 ifp->if_oerrors++; 293 } 294 } 295 } 296 297 /* 298 * Reuse configured Rx descriptors and reset 299 * procuder/consumer index. 300 */ 301 sc->jme_cdata.jme_rx_cons = 0; 302 303 jme_init_tx_ring(sc); 304 305 /* Initialize shadow status block. */ 306 jme_init_ssb(sc); 307 308 /* Program MAC with resolved speed/duplex/flow-control. */ 309 if (sc->jme_flags & JME_FLAG_LINK) { 310 jme_mac_config(sc); 311 312 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 313 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 314 315 /* Set Tx ring address to the hardware. */ 316 paddr = JME_TX_RING_ADDR(sc, 0); 317 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 318 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 319 320 /* Set Rx ring address to the hardware. */ 321 paddr = JME_RX_RING_ADDR(sc, 0); 322 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 323 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 324 325 /* Restart receiver/transmitter. */ 326 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 327 RXCSR_RXQ_START); 328 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 329 } 330 331 ifp->if_flags |= IFF_RUNNING; 332 ifp->if_flags &= ~IFF_OACTIVE; 333 timeout_add_sec(&sc->jme_tick_ch, 1); 334 335 /* Reenable interrupts. */ 336 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 337 } 338 339 /* 340 * Get the current interface media status. 341 */ 342 void 343 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 344 { 345 struct jme_softc *sc = ifp->if_softc; 346 struct mii_data *mii = &sc->sc_miibus; 347 348 mii_pollstat(mii); 349 ifmr->ifm_status = mii->mii_media_status; 350 ifmr->ifm_active = mii->mii_media_active; 351 } 352 353 /* 354 * Set hardware to newly-selected media. 355 */ 356 int 357 jme_mediachange(struct ifnet *ifp) 358 { 359 struct jme_softc *sc = ifp->if_softc; 360 struct mii_data *mii = &sc->sc_miibus; 361 int error; 362 363 if (mii->mii_instance != 0) { 364 struct mii_softc *miisc; 365 366 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 367 mii_phy_reset(miisc); 368 } 369 error = mii_mediachg(mii); 370 371 return (error); 372 } 373 374 int 375 jme_match(struct device *dev, void *match, void *aux) 376 { 377 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices, 378 sizeof (jme_devices) / sizeof (jme_devices[0])); 379 } 380 381 int 382 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 383 { 384 uint32_t reg; 385 int i; 386 387 *val = 0; 388 for (i = JME_TIMEOUT; i > 0; i--) { 389 reg = CSR_READ_4(sc, JME_SMBCSR); 390 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 391 break; 392 DELAY(1); 393 } 394 395 if (i == 0) { 396 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname); 397 return (ETIMEDOUT); 398 } 399 400 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 401 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 402 for (i = JME_TIMEOUT; i > 0; i--) { 403 DELAY(1); 404 reg = CSR_READ_4(sc, JME_SMBINTF); 405 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 406 break; 407 } 408 409 if (i == 0) { 410 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname); 411 return (ETIMEDOUT); 412 } 413 414 reg = CSR_READ_4(sc, JME_SMBINTF); 415 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 416 417 return (0); 418 } 419 420 int 421 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 422 { 423 uint8_t fup, reg, val; 424 uint32_t offset; 425 int match; 426 427 offset = 0; 428 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 429 fup != JME_EEPROM_SIG0) 430 return (ENOENT); 431 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 432 fup != JME_EEPROM_SIG1) 433 return (ENOENT); 434 match = 0; 435 do { 436 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 437 break; 438 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 439 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 440 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 441 break; 442 if (reg >= JME_PAR0 && 443 reg < JME_PAR0 + ETHER_ADDR_LEN) { 444 if (jme_eeprom_read_byte(sc, offset + 2, 445 &val) != 0) 446 break; 447 eaddr[reg - JME_PAR0] = val; 448 match++; 449 } 450 } 451 /* Check for the end of EEPROM descriptor. */ 452 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 453 break; 454 /* Try next eeprom descriptor. */ 455 offset += JME_EEPROM_DESC_BYTES; 456 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 457 458 if (match == ETHER_ADDR_LEN) 459 return (0); 460 461 return (ENOENT); 462 } 463 464 void 465 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 466 { 467 uint32_t par0, par1; 468 469 /* Read station address. */ 470 par0 = CSR_READ_4(sc, JME_PAR0); 471 par1 = CSR_READ_4(sc, JME_PAR1); 472 par1 &= 0xFFFF; 473 474 eaddr[0] = (par0 >> 0) & 0xFF; 475 eaddr[1] = (par0 >> 8) & 0xFF; 476 eaddr[2] = (par0 >> 16) & 0xFF; 477 eaddr[3] = (par0 >> 24) & 0xFF; 478 eaddr[4] = (par1 >> 0) & 0xFF; 479 eaddr[5] = (par1 >> 8) & 0xFF; 480 } 481 482 void 483 jme_attach(struct device *parent, struct device *self, void *aux) 484 { 485 struct jme_softc *sc = (struct jme_softc *)self; 486 struct pci_attach_args *pa = aux; 487 pci_chipset_tag_t pc = pa->pa_pc; 488 pci_intr_handle_t ih; 489 const char *intrstr; 490 pcireg_t memtype; 491 492 struct ifnet *ifp; 493 uint32_t reg; 494 int error = 0; 495 496 /* 497 * Allocate IO memory 498 * 499 * JMC250 supports both memory mapped and I/O register space 500 * access. Because I/O register access should use different 501 * BARs to access registers it's waste of time to use I/O 502 * register spce access. JMC250 uses 16K to map entire memory 503 * space. 504 */ 505 506 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR); 507 if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt, 508 &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) { 509 printf(": can't map mem space\n"); 510 return; 511 } 512 513 if (pci_intr_map(pa, &ih) != 0) { 514 printf(": can't map interrupt\n"); 515 return; 516 } 517 518 /* 519 * Allocate IRQ 520 */ 521 intrstr = pci_intr_string(pc, ih); 522 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc, 523 sc->sc_dev.dv_xname); 524 if (sc->sc_irq_handle == NULL) { 525 printf(": could not establish interrupt"); 526 if (intrstr != NULL) 527 printf(" at %s", intrstr); 528 printf("\n"); 529 return; 530 } 531 printf(": %s", intrstr); 532 533 sc->sc_dmat = pa->pa_dmat; 534 sc->jme_pct = pa->pa_pc; 535 sc->jme_pcitag = pa->pa_tag; 536 537 /* 538 * Extract FPGA revision 539 */ 540 reg = CSR_READ_4(sc, JME_CHIPMODE); 541 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 542 CHIPMODE_NOT_FPGA) { 543 sc->jme_caps |= JME_CAP_FPGA; 544 545 if (jmedebug) { 546 printf("%s: FPGA revision : 0x%04x\n", 547 sc->sc_dev.dv_xname, 548 (reg & CHIPMODE_FPGA_REV_MASK) >> 549 CHIPMODE_FPGA_REV_SHIFT); 550 } 551 } 552 553 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT; 554 555 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 && 556 PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2) 557 sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS; 558 559 /* Reset the ethernet controller. */ 560 jme_reset(sc); 561 562 /* Get station address. */ 563 reg = CSR_READ_4(sc, JME_SMBCSR); 564 if (reg & SMBCSR_EEPROM_PRESENT) 565 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr); 566 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 567 if (error != 0 && (jmedebug)) { 568 printf("%s: ethernet hardware address " 569 "not found in EEPROM.\n", sc->sc_dev.dv_xname); 570 } 571 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr); 572 } 573 574 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 575 576 /* 577 * Save PHY address. 578 * Integrated JR0211 has fixed PHY address whereas FPGA version 579 * requires PHY probing to get correct PHY address. 580 */ 581 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 582 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 583 GPREG0_PHY_ADDR_MASK; 584 if (jmedebug) { 585 printf("%s: PHY is at address %d.\n", 586 sc->sc_dev.dv_xname, sc->jme_phyaddr); 587 } 588 } else { 589 sc->jme_phyaddr = 0; 590 } 591 592 /* Set max allowable DMA size. */ 593 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 594 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 595 596 #ifdef notyet 597 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 598 sc->jme_caps |= JME_CAP_PMCAP; 599 #endif 600 601 /* Allocate DMA stuffs */ 602 error = jme_dma_alloc(sc); 603 if (error) 604 goto fail; 605 606 ifp = &sc->sc_arpcom.ac_if; 607 ifp->if_softc = sc; 608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 609 ifp->if_ioctl = jme_ioctl; 610 ifp->if_start = jme_start; 611 ifp->if_watchdog = jme_watchdog; 612 ifp->if_baudrate = IF_Gbps(1); 613 IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1); 614 IFQ_SET_READY(&ifp->if_snd); 615 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 616 617 ifp->if_capabilities = IFCAP_VLAN_MTU; 618 619 #ifdef JME_CHECKSUM 620 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 621 IFCAP_CSUM_UDPv4; 622 #endif 623 624 #if NVLAN > 0 625 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 626 #endif 627 628 /* Set up MII bus. */ 629 sc->sc_miibus.mii_ifp = ifp; 630 sc->sc_miibus.mii_readreg = jme_miibus_readreg; 631 sc->sc_miibus.mii_writereg = jme_miibus_writereg; 632 sc->sc_miibus.mii_statchg = jme_miibus_statchg; 633 634 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange, 635 jme_mediastatus); 636 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 637 MII_OFFSET_ANY, 0); 638 639 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 640 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 641 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 642 0, NULL); 643 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 644 } else 645 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 646 647 /* 648 * Save PHYADDR for FPGA mode PHY not handled, not production hw 649 */ 650 651 if_attach(ifp); 652 ether_ifattach(ifp); 653 654 timeout_set(&sc->jme_tick_ch, jme_tick, sc); 655 656 return; 657 fail: 658 jme_detach(&sc->sc_dev, 0); 659 } 660 661 int 662 jme_detach(struct device *self, int flags) 663 { 664 struct jme_softc *sc = (struct jme_softc *)self; 665 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 666 int s; 667 668 s = splnet(); 669 jme_stop(sc); 670 splx(s); 671 672 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 673 674 /* Delete all remaining media. */ 675 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 676 677 ether_ifdetach(ifp); 678 if_detach(ifp); 679 jme_dma_free(sc); 680 681 if (sc->sc_irq_handle != NULL) { 682 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle); 683 sc->sc_irq_handle = NULL; 684 } 685 686 return (0); 687 } 688 689 int 690 jme_dma_alloc(struct jme_softc *sc) 691 { 692 struct jme_txdesc *txd; 693 struct jme_rxdesc *rxd; 694 int error, i, nsegs; 695 696 /* 697 * Create DMA stuffs for TX ring 698 */ 699 700 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1, 701 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT, 702 &sc->jme_cdata.jme_tx_ring_map); 703 if (error) 704 return (ENOBUFS); 705 706 /* Allocate DMA'able memory for TX ring */ 707 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0, 708 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs, 709 BUS_DMA_WAITOK); 710 /* XXX zero */ 711 if (error) { 712 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 713 sc->sc_dev.dv_xname); 714 return error; 715 } 716 717 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg, 718 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring, 719 BUS_DMA_NOWAIT); 720 if (error) 721 return (ENOBUFS); 722 723 /* Load the DMA map for Tx ring. */ 724 error = bus_dmamap_load(sc->sc_dmat, 725 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 726 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 727 if (error) { 728 printf("%s: could not load DMA'able memory for Tx ring.\n", 729 sc->sc_dev.dv_xname); 730 bus_dmamem_free(sc->sc_dmat, 731 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1); 732 return error; 733 } 734 sc->jme_rdata.jme_tx_ring_paddr = 735 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr; 736 737 /* 738 * Create DMA stuffs for RX ring 739 */ 740 741 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1, 742 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT, 743 &sc->jme_cdata.jme_rx_ring_map); 744 if (error) 745 return (ENOBUFS); 746 747 /* Allocate DMA'able memory for RX ring */ 748 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0, 749 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs, 750 BUS_DMA_WAITOK | BUS_DMA_ZERO); 751 /* XXX zero */ 752 if (error) { 753 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 754 sc->sc_dev.dv_xname); 755 return error; 756 } 757 758 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg, 759 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring, 760 BUS_DMA_NOWAIT); 761 if (error) 762 return (ENOBUFS); 763 764 /* Load the DMA map for Rx ring. */ 765 error = bus_dmamap_load(sc->sc_dmat, 766 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 767 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 768 if (error) { 769 printf("%s: could not load DMA'able memory for Rx ring.\n", 770 sc->sc_dev.dv_xname); 771 bus_dmamem_free(sc->sc_dmat, 772 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 773 return error; 774 } 775 sc->jme_rdata.jme_rx_ring_paddr = 776 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr; 777 778 #if 0 779 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 780 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE; 781 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE; 782 if ((JME_ADDR_HI(tx_ring_end) != 783 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 784 (JME_ADDR_HI(rx_ring_end) != 785 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 786 printf("%s: 4GB boundary crossed, switching to 32bit " 787 "DMA address mode.\n", sc->sc_dev.dv_xname); 788 jme_dma_free(sc); 789 /* Limit DMA address space to 32bit and try again. */ 790 lowaddr = BUS_SPACE_MAXADDR_32BIT; 791 goto again; 792 } 793 #endif 794 795 /* 796 * Create DMA stuffs for shadow status block 797 */ 798 799 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1, 800 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map); 801 if (error) 802 return (ENOBUFS); 803 804 /* Allocate DMA'able memory for shared status block. */ 805 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0, 806 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK); 807 if (error) { 808 printf("%s: could not allocate DMA'able " 809 "memory for shared status block.\n", sc->sc_dev.dv_xname); 810 return error; 811 } 812 813 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg, 814 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block, 815 BUS_DMA_NOWAIT); 816 if (error) 817 return (ENOBUFS); 818 819 /* Load the DMA map for shared status block */ 820 error = bus_dmamap_load(sc->sc_dmat, 821 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 822 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT); 823 if (error) { 824 printf("%s: could not load DMA'able memory " 825 "for shared status block.\n", sc->sc_dev.dv_xname); 826 bus_dmamem_free(sc->sc_dmat, 827 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 828 return error; 829 } 830 sc->jme_rdata.jme_ssb_block_paddr = 831 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr; 832 833 /* 834 * Create DMA stuffs for TX buffers 835 */ 836 837 /* Create DMA maps for Tx buffers. */ 838 for (i = 0; i < JME_TX_RING_CNT; i++) { 839 txd = &sc->jme_cdata.jme_txdesc[i]; 840 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE, 841 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 842 &txd->tx_dmamap); 843 if (error) { 844 int j; 845 846 printf("%s: could not create %dth Tx dmamap.\n", 847 sc->sc_dev.dv_xname, i); 848 849 for (j = 0; j < i; ++j) { 850 txd = &sc->jme_cdata.jme_txdesc[j]; 851 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 852 } 853 return error; 854 } 855 856 } 857 858 /* 859 * Create DMA stuffs for RX buffers 860 */ 861 862 /* Create DMA maps for Rx buffers. */ 863 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 864 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap); 865 if (error) { 866 printf("%s: could not create spare Rx dmamap.\n", 867 sc->sc_dev.dv_xname); 868 return error; 869 } 870 for (i = 0; i < JME_RX_RING_CNT; i++) { 871 rxd = &sc->jme_cdata.jme_rxdesc[i]; 872 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 873 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 874 if (error) { 875 int j; 876 877 printf("%s: could not create %dth Rx dmamap.\n", 878 sc->sc_dev.dv_xname, i); 879 880 for (j = 0; j < i; ++j) { 881 rxd = &sc->jme_cdata.jme_rxdesc[j]; 882 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 883 } 884 bus_dmamap_destroy(sc->sc_dmat, 885 sc->jme_cdata.jme_rx_sparemap); 886 sc->jme_cdata.jme_rx_tag = NULL; 887 return error; 888 } 889 } 890 891 return 0; 892 } 893 894 void 895 jme_dma_free(struct jme_softc *sc) 896 { 897 struct jme_txdesc *txd; 898 struct jme_rxdesc *rxd; 899 int i; 900 901 /* Tx ring */ 902 bus_dmamap_unload(sc->sc_dmat, 903 sc->jme_cdata.jme_tx_ring_map); 904 bus_dmamem_free(sc->sc_dmat, 905 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1); 906 907 /* Rx ring */ 908 bus_dmamap_unload(sc->sc_dmat, 909 sc->jme_cdata.jme_rx_ring_map); 910 bus_dmamem_free(sc->sc_dmat, 911 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 912 913 /* Tx buffers */ 914 for (i = 0; i < JME_TX_RING_CNT; i++) { 915 txd = &sc->jme_cdata.jme_txdesc[i]; 916 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 917 } 918 919 /* Rx buffers */ 920 for (i = 0; i < JME_RX_RING_CNT; i++) { 921 rxd = &sc->jme_cdata.jme_rxdesc[i]; 922 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 923 } 924 bus_dmamap_destroy(sc->sc_dmat, 925 sc->jme_cdata.jme_rx_sparemap); 926 927 /* Shadow status block. */ 928 bus_dmamap_unload(sc->sc_dmat, 929 sc->jme_cdata.jme_ssb_map); 930 bus_dmamem_free(sc->sc_dmat, 931 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 932 } 933 934 #ifdef notyet 935 /* 936 * Unlike other ethernet controllers, JMC250 requires 937 * explicit resetting link speed to 10/100Mbps as gigabit 938 * link will cunsume more power than 375mA. 939 * Note, we reset the link speed to 10/100Mbps with 940 * auto-negotiation but we don't know whether that operation 941 * would succeed or not as we have no control after powering 942 * off. If the renegotiation fail WOL may not work. Running 943 * at 1Gbps draws more power than 375mA at 3.3V which is 944 * specified in PCI specification and that would result in 945 * complete shutdowning power to ethernet controller. 946 * 947 * TODO 948 * Save current negotiated media speed/duplex/flow-control 949 * to softc and restore the same link again after resuming. 950 * PHY handling such as power down/resetting to 100Mbps 951 * may be better handled in suspend method in phy driver. 952 */ 953 void 954 jme_setlinkspeed(struct jme_softc *sc) 955 { 956 struct mii_data *mii; 957 int aneg, i; 958 959 JME_LOCK_ASSERT(sc); 960 961 mii = &sc->sc_miibus; 962 mii_pollstat(mii); 963 aneg = 0; 964 if ((mii->mii_media_status & IFM_AVALID) != 0) { 965 switch IFM_SUBTYPE(mii->mii_media_active) { 966 case IFM_10_T: 967 case IFM_100_TX: 968 return; 969 case IFM_1000_T: 970 aneg++; 971 default: 972 break; 973 } 974 } 975 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0); 976 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR, 977 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 978 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR, 979 BMCR_AUTOEN | BMCR_STARTNEG); 980 DELAY(1000); 981 if (aneg != 0) { 982 /* Poll link state until jme(4) get a 10/100 link. */ 983 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 984 mii_pollstat(mii); 985 if ((mii->mii_media_status & IFM_AVALID) != 0) { 986 switch (IFM_SUBTYPE(mii->mii_media_active)) { 987 case IFM_10_T: 988 case IFM_100_TX: 989 jme_mac_config(sc); 990 return; 991 default: 992 break; 993 } 994 } 995 JME_UNLOCK(sc); 996 pause("jmelnk", hz); 997 JME_LOCK(sc); 998 } 999 if (i == MII_ANEGTICKS_GIGE) 1000 printf("%s: establishing link failed, " 1001 "WOL may not work!\n", sc->sc_dev.dv_xname); 1002 } 1003 /* 1004 * No link, force MAC to have 100Mbps, full-duplex link. 1005 * This is the last resort and may/may not work. 1006 */ 1007 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1008 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1009 jme_mac_config(sc); 1010 } 1011 1012 void 1013 jme_setwol(struct jme_softc *sc) 1014 { 1015 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1016 uint32_t gpr, pmcs; 1017 uint16_t pmstat; 1018 int pmc; 1019 1020 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) { 1021 /* No PME capability, PHY power down. */ 1022 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1023 MII_BMCR, BMCR_PDOWN); 1024 return; 1025 } 1026 1027 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1028 pmcs = CSR_READ_4(sc, JME_PMCS); 1029 pmcs &= ~PMCS_WOL_ENB_MASK; 1030 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1031 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1032 /* Enable PME message. */ 1033 gpr |= GPREG0_PME_ENB; 1034 /* For gigabit controllers, reset link speed to 10/100. */ 1035 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1036 jme_setlinkspeed(sc); 1037 } 1038 1039 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1040 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1041 1042 /* Request PME. */ 1043 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2); 1044 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1045 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1046 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1047 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1048 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1049 /* No WOL, PHY power down. */ 1050 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1051 MII_BMCR, BMCR_PDOWN); 1052 } 1053 } 1054 #endif 1055 1056 int 1057 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1058 { 1059 struct jme_txdesc *txd; 1060 struct jme_desc *desc; 1061 struct mbuf *m; 1062 int maxsegs; 1063 int error, i, prod; 1064 uint32_t cflags; 1065 1066 prod = sc->jme_cdata.jme_tx_prod; 1067 txd = &sc->jme_cdata.jme_txdesc[prod]; 1068 1069 maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) - 1070 (JME_TXD_RSVD + 1); 1071 if (maxsegs > JME_MAXTXSEGS) 1072 maxsegs = JME_MAXTXSEGS; 1073 if (maxsegs < (sc->jme_txd_spare - 1)) 1074 panic("%s: not enough segments %d", sc->sc_dev.dv_xname, 1075 maxsegs); 1076 1077 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, 1078 *m_head, BUS_DMA_NOWAIT); 1079 if (error != 0) { 1080 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1081 error = EFBIG; 1082 } 1083 if (error == EFBIG) { 1084 if (m_defrag(*m_head, M_DONTWAIT)) { 1085 printf("%s: can't defrag TX mbuf\n", 1086 sc->sc_dev.dv_xname); 1087 m_freem(*m_head); 1088 *m_head = NULL; 1089 return (ENOBUFS); 1090 } 1091 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1092 txd->tx_dmamap, *m_head, 1093 BUS_DMA_NOWAIT); 1094 if (error != 0) { 1095 printf("%s: could not load defragged TX mbuf\n", 1096 sc->sc_dev.dv_xname); 1097 m_freem(*m_head); 1098 *m_head = NULL; 1099 return (error); 1100 } 1101 } else if (error) { 1102 printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname); 1103 return (error); 1104 } 1105 1106 m = *m_head; 1107 cflags = 0; 1108 1109 /* Configure checksum offload. */ 1110 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1111 cflags |= JME_TD_IPCSUM; 1112 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1113 cflags |= JME_TD_TCPCSUM; 1114 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1115 cflags |= JME_TD_UDPCSUM; 1116 1117 #if NVLAN > 0 1118 /* Configure VLAN. */ 1119 if (m->m_flags & M_VLANTAG) { 1120 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1121 cflags |= JME_TD_VLAN_TAG; 1122 } 1123 #endif 1124 1125 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1126 desc->flags = htole32(cflags); 1127 desc->buflen = 0; 1128 desc->addr_hi = htole32(m->m_pkthdr.len); 1129 desc->addr_lo = 0; 1130 sc->jme_cdata.jme_tx_cnt++; 1131 KASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD); 1132 JME_DESC_INC(prod, JME_TX_RING_CNT); 1133 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) { 1134 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1135 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1136 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len); 1137 desc->addr_hi = 1138 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr)); 1139 desc->addr_lo = 1140 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr)); 1141 1142 sc->jme_cdata.jme_tx_cnt++; 1143 KASSERT(sc->jme_cdata.jme_tx_cnt <= 1144 JME_TX_RING_CNT - JME_TXD_RSVD); 1145 JME_DESC_INC(prod, JME_TX_RING_CNT); 1146 } 1147 1148 /* Update producer index. */ 1149 sc->jme_cdata.jme_tx_prod = prod; 1150 /* 1151 * Finally request interrupt and give the first descriptor 1152 * owenership to hardware. 1153 */ 1154 desc = txd->tx_desc; 1155 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1156 1157 txd->tx_m = m; 1158 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + 1; 1159 1160 /* Sync descriptors. */ 1161 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1162 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1163 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1164 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1165 1166 return (0); 1167 } 1168 1169 void 1170 jme_start(struct ifnet *ifp) 1171 { 1172 struct jme_softc *sc = ifp->if_softc; 1173 struct mbuf *m_head; 1174 int enq = 0; 1175 1176 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1177 return; 1178 1179 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1180 jme_txeof(sc); 1181 1182 for (;;) { 1183 /* 1184 * Check number of available TX descs, always 1185 * leave JME_TXD_RSVD free TX descs. 1186 */ 1187 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1188 JME_TX_RING_CNT - JME_TXD_RSVD) { 1189 ifp->if_flags |= IFF_OACTIVE; 1190 break; 1191 } 1192 1193 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1194 if (m_head == NULL) 1195 break; 1196 1197 /* 1198 * Pack the data into the transmit ring. If we 1199 * don't have room, set the OACTIVE flag and wait 1200 * for the NIC to drain the ring. 1201 */ 1202 if (jme_encap(sc, &m_head)) { 1203 if (m_head == NULL) { 1204 ifp->if_oerrors++; 1205 break; 1206 } 1207 ifp->if_flags |= IFF_OACTIVE; 1208 break; 1209 } 1210 enq++; 1211 1212 #if NBPFILTER > 0 1213 /* 1214 * If there's a BPF listener, bounce a copy of this frame 1215 * to him. 1216 */ 1217 if (ifp->if_bpf != NULL) 1218 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1219 #endif 1220 } 1221 1222 if (enq > 0) { 1223 /* 1224 * Reading TXCSR takes very long time under heavy load 1225 * so cache TXCSR value and writes the ORed value with 1226 * the kick command to the TXCSR. This saves one register 1227 * access cycle. 1228 */ 1229 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1230 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1231 /* Set a timeout in case the chip goes out to lunch. */ 1232 ifp->if_timer = JME_TX_TIMEOUT; 1233 } 1234 } 1235 1236 void 1237 jme_watchdog(struct ifnet *ifp) 1238 { 1239 struct jme_softc *sc = ifp->if_softc; 1240 1241 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1242 printf("%s: watchdog timeout (missed link)\n", 1243 sc->sc_dev.dv_xname); 1244 ifp->if_oerrors++; 1245 jme_init(ifp); 1246 return; 1247 } 1248 1249 jme_txeof(sc); 1250 if (sc->jme_cdata.jme_tx_cnt == 0) { 1251 printf("%s: watchdog timeout (missed Tx interrupts) " 1252 "-- recovering\n", sc->sc_dev.dv_xname); 1253 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1254 jme_start(ifp); 1255 return; 1256 } 1257 1258 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1259 ifp->if_oerrors++; 1260 jme_init(ifp); 1261 1262 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1263 jme_start(ifp); 1264 } 1265 1266 int 1267 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1268 { 1269 struct jme_softc *sc = ifp->if_softc; 1270 struct mii_data *mii = &sc->sc_miibus; 1271 struct ifaddr *ifa = (struct ifaddr *)data; 1272 struct ifreq *ifr = (struct ifreq *)data; 1273 int error = 0, s; 1274 1275 s = splnet(); 1276 1277 switch (cmd) { 1278 case SIOCSIFADDR: 1279 ifp->if_flags |= IFF_UP; 1280 if (!(ifp->if_flags & IFF_RUNNING)) 1281 jme_init(ifp); 1282 #ifdef INET 1283 if (ifa->ifa_addr->sa_family == AF_INET) 1284 arp_ifinit(&sc->sc_arpcom, ifa); 1285 #endif 1286 break; 1287 1288 case SIOCSIFFLAGS: 1289 if (ifp->if_flags & IFF_UP) { 1290 if (ifp->if_flags & IFF_RUNNING) 1291 error = ENETRESET; 1292 else 1293 jme_init(ifp); 1294 } else { 1295 if (ifp->if_flags & IFF_RUNNING) 1296 jme_stop(sc); 1297 } 1298 break; 1299 1300 case SIOCSIFMEDIA: 1301 case SIOCGIFMEDIA: 1302 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1303 break; 1304 1305 default: 1306 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1307 } 1308 1309 if (error == ENETRESET) { 1310 if (ifp->if_flags & IFF_RUNNING) 1311 jme_iff(sc); 1312 error = 0; 1313 } 1314 1315 splx(s); 1316 return (error); 1317 } 1318 1319 void 1320 jme_mac_config(struct jme_softc *sc) 1321 { 1322 struct mii_data *mii; 1323 uint32_t ghc, rxmac, txmac, txpause, gp1; 1324 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1325 1326 mii = &sc->sc_miibus; 1327 1328 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1329 DELAY(10); 1330 CSR_WRITE_4(sc, JME_GHC, 0); 1331 ghc = 0; 1332 rxmac = CSR_READ_4(sc, JME_RXMAC); 1333 rxmac &= ~RXMAC_FC_ENB; 1334 txmac = CSR_READ_4(sc, JME_TXMAC); 1335 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1336 txpause = CSR_READ_4(sc, JME_TXPFC); 1337 txpause &= ~TXPFC_PAUSE_ENB; 1338 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1339 ghc |= GHC_FULL_DUPLEX; 1340 rxmac &= ~RXMAC_COLL_DET_ENB; 1341 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1342 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1343 TXMAC_FRAME_BURST); 1344 #ifdef notyet 1345 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1346 txpause |= TXPFC_PAUSE_ENB; 1347 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1348 rxmac |= RXMAC_FC_ENB; 1349 #endif 1350 /* Disable retry transmit timer/retry limit. */ 1351 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1352 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1353 } else { 1354 rxmac |= RXMAC_COLL_DET_ENB; 1355 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1356 /* Enable retry transmit timer/retry limit. */ 1357 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1358 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1359 } 1360 1361 /* 1362 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1363 */ 1364 gp1 = CSR_READ_4(sc, JME_GPREG1); 1365 gp1 &= ~GPREG1_HALF_PATCH; 1366 1367 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1368 hdx = 1; 1369 1370 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1371 case IFM_10_T: 1372 ghc |= GHC_SPEED_10; 1373 if (hdx) 1374 gp1 |= GPREG1_HALF_PATCH; 1375 break; 1376 1377 case IFM_100_TX: 1378 ghc |= GHC_SPEED_100; 1379 if (hdx) 1380 gp1 |= GPREG1_HALF_PATCH; 1381 1382 /* 1383 * Use extended FIFO depth to workaround CRC errors 1384 * emitted by chips before JMC250B 1385 */ 1386 phyconf = JMPHY_CONF_EXTFIFO; 1387 break; 1388 1389 case IFM_1000_T: 1390 if (sc->jme_caps & JME_CAP_FASTETH) 1391 break; 1392 1393 ghc |= GHC_SPEED_1000; 1394 if (hdx) 1395 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1396 break; 1397 1398 default: 1399 break; 1400 } 1401 1402 if (sc->jme_revfm >= 2) { 1403 /* set clock sources for tx mac and offload engine */ 1404 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1405 ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000; 1406 else 1407 ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100; 1408 } 1409 1410 CSR_WRITE_4(sc, JME_GHC, ghc); 1411 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1412 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1413 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1414 1415 if (sc->jme_workaround & JME_WA_CRCERRORS) { 1416 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1417 JMPHY_CONF, phyconf); 1418 } 1419 if (sc->jme_workaround & JME_WA_PACKETLOSS) 1420 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1421 } 1422 1423 int 1424 jme_intr(void *xsc) 1425 { 1426 struct jme_softc *sc = xsc; 1427 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1428 uint32_t status; 1429 int claimed = 0; 1430 1431 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1432 if (status == 0 || status == 0xFFFFFFFF) 1433 return (0); 1434 1435 /* Disable interrupts. */ 1436 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1437 1438 status = CSR_READ_4(sc, JME_INTR_STATUS); 1439 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1440 goto back; 1441 1442 /* Reset PCC counter/timer and Ack interrupts. */ 1443 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1444 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1445 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1446 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1447 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1448 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1449 1450 if (ifp->if_flags & IFF_RUNNING) { 1451 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1452 jme_rxeof(sc); 1453 1454 if (status & INTR_RXQ_DESC_EMPTY) { 1455 /* 1456 * Notify hardware availability of new Rx buffers. 1457 * Reading RXCSR takes very long time under heavy 1458 * load so cache RXCSR value and writes the ORed 1459 * value with the kick command to the RXCSR. This 1460 * saves one register access cycle. 1461 */ 1462 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1463 RXCSR_RX_ENB | RXCSR_RXQ_START); 1464 } 1465 1466 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1467 jme_txeof(sc); 1468 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1469 jme_start(ifp); 1470 } 1471 } 1472 claimed = 1; 1473 back: 1474 /* Reenable interrupts. */ 1475 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1476 1477 return (claimed); 1478 } 1479 1480 void 1481 jme_txeof(struct jme_softc *sc) 1482 { 1483 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1484 struct jme_txdesc *txd; 1485 uint32_t status; 1486 int cons, nsegs; 1487 1488 cons = sc->jme_cdata.jme_tx_cons; 1489 if (cons == sc->jme_cdata.jme_tx_prod) 1490 return; 1491 1492 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1493 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1494 1495 /* 1496 * Go through our Tx list and free mbufs for those 1497 * frames which have been transmitted. 1498 */ 1499 while (cons != sc->jme_cdata.jme_tx_prod) { 1500 txd = &sc->jme_cdata.jme_txdesc[cons]; 1501 1502 if (txd->tx_m == NULL) 1503 panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname); 1504 1505 status = letoh32(txd->tx_desc->flags); 1506 if ((status & JME_TD_OWN) == JME_TD_OWN) 1507 break; 1508 1509 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 1510 ifp->if_oerrors++; 1511 } else { 1512 ifp->if_opackets++; 1513 if (status & JME_TD_COLLISION) { 1514 ifp->if_collisions += 1515 letoh32(txd->tx_desc->buflen) & 1516 JME_TD_BUF_LEN_MASK; 1517 } 1518 } 1519 1520 /* 1521 * Only the first descriptor of multi-descriptor 1522 * transmission is updated so driver have to skip entire 1523 * chained buffers for the transmiited frame. In other 1524 * words, JME_TD_OWN bit is valid only at the first 1525 * descriptor of a multi-descriptor transmission. 1526 */ 1527 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1528 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 1529 JME_DESC_INC(cons, JME_TX_RING_CNT); 1530 } 1531 1532 /* Reclaim transferred mbufs. */ 1533 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1534 m_freem(txd->tx_m); 1535 txd->tx_m = NULL; 1536 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 1537 if (sc->jme_cdata.jme_tx_cnt < 0) 1538 panic("%s: Active Tx desc counter was garbled", 1539 sc->sc_dev.dv_xname); 1540 txd->tx_ndesc = 0; 1541 } 1542 sc->jme_cdata.jme_tx_cons = cons; 1543 1544 if (sc->jme_cdata.jme_tx_cnt == 0) 1545 ifp->if_timer = 0; 1546 1547 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 1548 JME_TX_RING_CNT - JME_TXD_RSVD) 1549 ifp->if_flags &= ~IFF_OACTIVE; 1550 1551 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1552 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1553 } 1554 1555 void 1556 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 1557 { 1558 int i; 1559 1560 for (i = 0; i < count; ++i) { 1561 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 1562 1563 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 1564 desc->buflen = htole32(MCLBYTES); 1565 JME_DESC_INC(cons, JME_RX_RING_CNT); 1566 } 1567 } 1568 1569 /* Receive a frame. */ 1570 void 1571 jme_rxpkt(struct jme_softc *sc) 1572 { 1573 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1574 struct jme_desc *desc; 1575 struct jme_rxdesc *rxd; 1576 struct mbuf *mp, *m; 1577 uint32_t flags, status; 1578 int cons, count, nsegs; 1579 1580 cons = sc->jme_cdata.jme_rx_cons; 1581 desc = &sc->jme_rdata.jme_rx_ring[cons]; 1582 flags = letoh32(desc->flags); 1583 status = letoh32(desc->buflen); 1584 nsegs = JME_RX_NSEGS(status); 1585 1586 if (status & JME_RX_ERR_STAT) { 1587 ifp->if_ierrors++; 1588 jme_discard_rxbufs(sc, cons, nsegs); 1589 #ifdef JME_SHOW_ERRORS 1590 printf("%s : receive error = 0x%b\n", 1591 sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS); 1592 #endif 1593 sc->jme_cdata.jme_rx_cons += nsegs; 1594 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1595 return; 1596 } 1597 1598 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 1599 for (count = 0; count < nsegs; count++, 1600 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 1601 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 1602 mp = rxd->rx_m; 1603 1604 /* Add a new receive buffer to the ring. */ 1605 if (jme_newbuf(sc, rxd, 0) != 0) { 1606 ifp->if_iqdrops++; 1607 /* Reuse buffer. */ 1608 jme_discard_rxbufs(sc, cons, nsegs - count); 1609 if (sc->jme_cdata.jme_rxhead != NULL) { 1610 m_freem(sc->jme_cdata.jme_rxhead); 1611 JME_RXCHAIN_RESET(sc); 1612 } 1613 break; 1614 } 1615 1616 /* 1617 * Assume we've received a full sized frame. 1618 * Actual size is fixed when we encounter the end of 1619 * multi-segmented frame. 1620 */ 1621 mp->m_len = MCLBYTES; 1622 1623 /* Chain received mbufs. */ 1624 if (sc->jme_cdata.jme_rxhead == NULL) { 1625 sc->jme_cdata.jme_rxhead = mp; 1626 sc->jme_cdata.jme_rxtail = mp; 1627 } else { 1628 /* 1629 * Receive processor can receive a maximum frame 1630 * size of 65535 bytes. 1631 */ 1632 mp->m_flags &= ~M_PKTHDR; 1633 sc->jme_cdata.jme_rxtail->m_next = mp; 1634 sc->jme_cdata.jme_rxtail = mp; 1635 } 1636 1637 if (count == nsegs - 1) { 1638 /* Last desc. for this frame. */ 1639 m = sc->jme_cdata.jme_rxhead; 1640 /* XXX assert PKTHDR? */ 1641 m->m_flags |= M_PKTHDR; 1642 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 1643 if (nsegs > 1) { 1644 /* Set first mbuf size. */ 1645 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 1646 /* Set last mbuf size. */ 1647 mp->m_len = sc->jme_cdata.jme_rxlen - 1648 ((MCLBYTES - JME_RX_PAD_BYTES) + 1649 (MCLBYTES * (nsegs - 2))); 1650 } else { 1651 m->m_len = sc->jme_cdata.jme_rxlen; 1652 } 1653 m->m_pkthdr.rcvif = ifp; 1654 1655 /* 1656 * Account for 10bytes auto padding which is used 1657 * to align IP header on 32bit boundary. Also note, 1658 * CRC bytes is automatically removed by the 1659 * hardware. 1660 */ 1661 m->m_data += JME_RX_PAD_BYTES; 1662 1663 /* Set checksum information. */ 1664 if (flags & (JME_RD_IPV4|JME_RD_IPV6)) { 1665 if ((flags & JME_RD_IPV4) && 1666 (flags & JME_RD_IPCSUM)) 1667 m->m_pkthdr.csum_flags |= 1668 M_IPV4_CSUM_IN_OK; 1669 if ((flags & JME_RD_MORE_FRAG) == 0 && 1670 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 1671 (JME_RD_TCP | JME_RD_TCPCSUM) || 1672 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 1673 (JME_RD_UDP | JME_RD_UDPCSUM))) { 1674 m->m_pkthdr.csum_flags |= 1675 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1676 } 1677 } 1678 1679 #if NVLAN > 0 1680 /* Check for VLAN tagged packets. */ 1681 if (flags & JME_RD_VLAN_TAG) { 1682 m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK; 1683 m->m_flags |= M_VLANTAG; 1684 } 1685 #endif 1686 1687 #if NBPFILTER > 0 1688 if (ifp->if_bpf) 1689 bpf_mtap_ether(ifp->if_bpf, m, 1690 BPF_DIRECTION_IN); 1691 #endif 1692 1693 ifp->if_ipackets++; 1694 /* Pass it on. */ 1695 ether_input_mbuf(ifp, m); 1696 1697 /* Reset mbuf chains. */ 1698 JME_RXCHAIN_RESET(sc); 1699 } 1700 } 1701 1702 sc->jme_cdata.jme_rx_cons += nsegs; 1703 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1704 } 1705 1706 void 1707 jme_rxeof(struct jme_softc *sc) 1708 { 1709 struct jme_desc *desc; 1710 int nsegs, prog, pktlen; 1711 1712 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1713 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1714 1715 prog = 0; 1716 for (;;) { 1717 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 1718 if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 1719 break; 1720 if ((letoh32(desc->buflen) & JME_RD_VALID) == 0) 1721 break; 1722 1723 /* 1724 * Check number of segments against received bytes. 1725 * Non-matching value would indicate that hardware 1726 * is still trying to update Rx descriptors. I'm not 1727 * sure whether this check is needed. 1728 */ 1729 nsegs = JME_RX_NSEGS(letoh32(desc->buflen)); 1730 pktlen = JME_RX_BYTES(letoh32(desc->buflen)); 1731 if (nsegs != howmany(pktlen, MCLBYTES)) { 1732 printf("%s: RX fragment count(%d) " 1733 "and packet size(%d) mismach\n", 1734 sc->sc_dev.dv_xname, nsegs, pktlen); 1735 break; 1736 } 1737 1738 /* Received a frame. */ 1739 jme_rxpkt(sc); 1740 prog++; 1741 } 1742 1743 if (prog > 0) { 1744 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1745 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1746 } 1747 } 1748 1749 void 1750 jme_tick(void *xsc) 1751 { 1752 struct jme_softc *sc = xsc; 1753 struct mii_data *mii = &sc->sc_miibus; 1754 int s; 1755 1756 s = splnet(); 1757 mii_tick(mii); 1758 timeout_add_sec(&sc->jme_tick_ch, 1); 1759 splx(s); 1760 } 1761 1762 void 1763 jme_reset(struct jme_softc *sc) 1764 { 1765 #ifdef foo 1766 /* Stop receiver, transmitter. */ 1767 jme_stop_rx(sc); 1768 jme_stop_tx(sc); 1769 #endif 1770 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1771 DELAY(10); 1772 CSR_WRITE_4(sc, JME_GHC, 0); 1773 } 1774 1775 int 1776 jme_init(struct ifnet *ifp) 1777 { 1778 struct jme_softc *sc = ifp->if_softc; 1779 struct mii_data *mii; 1780 uint8_t eaddr[ETHER_ADDR_LEN]; 1781 bus_addr_t paddr; 1782 uint32_t reg; 1783 int error; 1784 1785 /* 1786 * Cancel any pending I/O. 1787 */ 1788 jme_stop(sc); 1789 1790 /* 1791 * Reset the chip to a known state. 1792 */ 1793 jme_reset(sc); 1794 1795 /* 1796 * Since we always use 64bit address mode for transmitting, 1797 * each Tx request requires one more dummy descriptor. 1798 */ 1799 sc->jme_txd_spare = 1800 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1; 1801 KASSERT(sc->jme_txd_spare >= 2); 1802 1803 /* Init descriptors. */ 1804 error = jme_init_rx_ring(sc); 1805 if (error != 0) { 1806 printf("%s: initialization failed: no memory for Rx buffers.\n", 1807 sc->sc_dev.dv_xname); 1808 jme_stop(sc); 1809 return (error); 1810 } 1811 jme_init_tx_ring(sc); 1812 1813 /* Initialize shadow status block. */ 1814 jme_init_ssb(sc); 1815 1816 /* Reprogram the station address. */ 1817 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1818 CSR_WRITE_4(sc, JME_PAR0, 1819 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 1820 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 1821 1822 /* 1823 * Configure Tx queue. 1824 * Tx priority queue weight value : 0 1825 * Tx FIFO threshold for processing next packet : 16QW 1826 * Maximum Tx DMA length : 512 1827 * Allow Tx DMA burst. 1828 */ 1829 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 1830 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 1831 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 1832 sc->jme_txcsr |= sc->jme_tx_dma_size; 1833 sc->jme_txcsr |= TXCSR_DMA_BURST; 1834 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 1835 1836 /* Set Tx descriptor counter. */ 1837 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 1838 1839 /* Set Tx ring address to the hardware. */ 1840 paddr = JME_TX_RING_ADDR(sc, 0); 1841 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 1842 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 1843 1844 /* Configure TxMAC parameters. */ 1845 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 1846 reg |= TXMAC_THRESH_1_PKT; 1847 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 1848 CSR_WRITE_4(sc, JME_TXMAC, reg); 1849 1850 /* 1851 * Configure Rx queue. 1852 * FIFO full threshold for transmitting Tx pause packet : 128T 1853 * FIFO threshold for processing next packet : 128QW 1854 * Rx queue 0 select 1855 * Max Rx DMA length : 128 1856 * Rx descriptor retry : 32 1857 * Rx descriptor retry time gap : 256ns 1858 * Don't receive runt/bad frame. 1859 */ 1860 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 1861 1862 /* 1863 * Since Rx FIFO size is 4K bytes, receiving frames larger 1864 * than 4K bytes will suffer from Rx FIFO overruns. So 1865 * decrease FIFO threshold to reduce the FIFO overruns for 1866 * frames larger than 4000 bytes. 1867 * For best performance of standard MTU sized frames use 1868 * maximum allowable FIFO threshold, which is 32QW for 1869 * chips with a full mask >= 2 otherwise 128QW. FIFO 1870 * thresholds of 64QW and 128QW are not valid for chips 1871 * with a full mask >= 2. 1872 */ 1873 if (sc->jme_revfm >= 2) 1874 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1875 else { 1876 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1877 ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE) 1878 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1879 else 1880 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 1881 } 1882 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 1883 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 1884 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 1885 /* XXX TODO DROP_BAD */ 1886 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 1887 1888 /* Set Rx descriptor counter. */ 1889 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 1890 1891 /* Set Rx ring address to the hardware. */ 1892 paddr = JME_RX_RING_ADDR(sc, 0); 1893 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 1894 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 1895 1896 /* Clear receive filter. */ 1897 CSR_WRITE_4(sc, JME_RXMAC, 0); 1898 1899 /* Set up the receive filter. */ 1900 jme_iff(sc); 1901 1902 jme_set_vlan(sc); 1903 1904 /* 1905 * Disable all WOL bits as WOL can interfere normal Rx 1906 * operation. Also clear WOL detection status bits. 1907 */ 1908 reg = CSR_READ_4(sc, JME_PMCS); 1909 reg &= ~PMCS_WOL_ENB_MASK; 1910 CSR_WRITE_4(sc, JME_PMCS, reg); 1911 1912 /* 1913 * Pad 10bytes right before received frame. This will greatly 1914 * help Rx performance on strict-alignment architectures as 1915 * it does not need to copy the frame to align the payload. 1916 */ 1917 reg = CSR_READ_4(sc, JME_RXMAC); 1918 reg |= RXMAC_PAD_10BYTES; 1919 reg |= RXMAC_CSUM_ENB; 1920 CSR_WRITE_4(sc, JME_RXMAC, reg); 1921 1922 /* Configure general purpose reg0 */ 1923 reg = CSR_READ_4(sc, JME_GPREG0); 1924 reg &= ~GPREG0_PCC_UNIT_MASK; 1925 /* Set PCC timer resolution to micro-seconds unit. */ 1926 reg |= GPREG0_PCC_UNIT_US; 1927 /* 1928 * Disable all shadow register posting as we have to read 1929 * JME_INTR_STATUS register in jme_intr. Also it seems 1930 * that it's hard to synchronize interrupt status between 1931 * hardware and software with shadow posting due to 1932 * requirements of bus_dmamap_sync(9). 1933 */ 1934 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 1935 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 1936 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 1937 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 1938 /* Disable posting of DW0. */ 1939 reg &= ~GPREG0_POST_DW0_ENB; 1940 /* Clear PME message. */ 1941 reg &= ~GPREG0_PME_ENB; 1942 /* Set PHY address. */ 1943 reg &= ~GPREG0_PHY_ADDR_MASK; 1944 reg |= sc->jme_phyaddr; 1945 CSR_WRITE_4(sc, JME_GPREG0, reg); 1946 1947 /* Configure Tx queue 0 packet completion coalescing. */ 1948 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1949 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 1950 PCCTX_COAL_TO_MASK; 1951 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1952 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 1953 PCCTX_COAL_PKT_MASK; 1954 reg |= PCCTX_COAL_TXQ0; 1955 CSR_WRITE_4(sc, JME_PCCTX, reg); 1956 1957 /* Configure Rx queue 0 packet completion coalescing. */ 1958 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1959 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 1960 PCCRX_COAL_TO_MASK; 1961 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1962 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 1963 PCCRX_COAL_PKT_MASK; 1964 CSR_WRITE_4(sc, JME_PCCRX0, reg); 1965 1966 /* Configure shadow status block but don't enable posting. */ 1967 paddr = sc->jme_rdata.jme_ssb_block_paddr; 1968 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 1969 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 1970 1971 /* Disable Timer 1 and Timer 2. */ 1972 CSR_WRITE_4(sc, JME_TIMER1, 0); 1973 CSR_WRITE_4(sc, JME_TIMER2, 0); 1974 1975 /* Configure retry transmit period, retry limit value. */ 1976 CSR_WRITE_4(sc, JME_TXTRHD, 1977 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 1978 TXTRHD_RT_PERIOD_MASK) | 1979 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 1980 TXTRHD_RT_LIMIT_SHIFT)); 1981 1982 /* Disable RSS. */ 1983 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 1984 1985 /* Initialize the interrupt mask. */ 1986 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1987 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 1988 1989 /* 1990 * Enabling Tx/Rx DMA engines and Rx queue processing is 1991 * done after detection of valid link in jme_miibus_statchg. 1992 */ 1993 sc->jme_flags &= ~JME_FLAG_LINK; 1994 1995 /* Set the current media. */ 1996 mii = &sc->sc_miibus; 1997 mii_mediachg(mii); 1998 1999 timeout_add_sec(&sc->jme_tick_ch, 1); 2000 2001 ifp->if_flags |= IFF_RUNNING; 2002 ifp->if_flags &= ~IFF_OACTIVE; 2003 2004 return (0); 2005 } 2006 2007 void 2008 jme_stop(struct jme_softc *sc) 2009 { 2010 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2011 struct jme_txdesc *txd; 2012 struct jme_rxdesc *rxd; 2013 int i; 2014 2015 /* 2016 * Mark the interface down and cancel the watchdog timer. 2017 */ 2018 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2019 ifp->if_timer = 0; 2020 2021 timeout_del(&sc->jme_tick_ch); 2022 sc->jme_flags &= ~JME_FLAG_LINK; 2023 2024 /* 2025 * Disable interrupts. 2026 */ 2027 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2028 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2029 2030 /* Disable updating shadow status block. */ 2031 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2032 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2033 2034 /* Stop receiver, transmitter. */ 2035 jme_stop_rx(sc); 2036 jme_stop_tx(sc); 2037 2038 #ifdef foo 2039 /* Reclaim Rx/Tx buffers that have been completed. */ 2040 jme_rxeof(sc); 2041 if (sc->jme_cdata.jme_rxhead != NULL) 2042 m_freem(sc->jme_cdata.jme_rxhead); 2043 JME_RXCHAIN_RESET(sc); 2044 jme_txeof(sc); 2045 #endif 2046 2047 /* 2048 * Free partial finished RX segments 2049 */ 2050 if (sc->jme_cdata.jme_rxhead != NULL) 2051 m_freem(sc->jme_cdata.jme_rxhead); 2052 JME_RXCHAIN_RESET(sc); 2053 2054 /* 2055 * Free RX and TX mbufs still in the queues. 2056 */ 2057 for (i = 0; i < JME_RX_RING_CNT; i++) { 2058 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2059 if (rxd->rx_m != NULL) { 2060 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2061 m_freem(rxd->rx_m); 2062 rxd->rx_m = NULL; 2063 } 2064 } 2065 for (i = 0; i < JME_TX_RING_CNT; i++) { 2066 txd = &sc->jme_cdata.jme_txdesc[i]; 2067 if (txd->tx_m != NULL) { 2068 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2069 m_freem(txd->tx_m); 2070 txd->tx_m = NULL; 2071 txd->tx_ndesc = 0; 2072 } 2073 } 2074 } 2075 2076 void 2077 jme_stop_tx(struct jme_softc *sc) 2078 { 2079 uint32_t reg; 2080 int i; 2081 2082 reg = CSR_READ_4(sc, JME_TXCSR); 2083 if ((reg & TXCSR_TX_ENB) == 0) 2084 return; 2085 reg &= ~TXCSR_TX_ENB; 2086 CSR_WRITE_4(sc, JME_TXCSR, reg); 2087 for (i = JME_TIMEOUT; i > 0; i--) { 2088 DELAY(1); 2089 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2090 break; 2091 } 2092 if (i == 0) 2093 printf("%s: stopping transmitter timeout!\n", 2094 sc->sc_dev.dv_xname); 2095 } 2096 2097 void 2098 jme_stop_rx(struct jme_softc *sc) 2099 { 2100 uint32_t reg; 2101 int i; 2102 2103 reg = CSR_READ_4(sc, JME_RXCSR); 2104 if ((reg & RXCSR_RX_ENB) == 0) 2105 return; 2106 reg &= ~RXCSR_RX_ENB; 2107 CSR_WRITE_4(sc, JME_RXCSR, reg); 2108 for (i = JME_TIMEOUT; i > 0; i--) { 2109 DELAY(1); 2110 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2111 break; 2112 } 2113 if (i == 0) 2114 printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname); 2115 } 2116 2117 void 2118 jme_init_tx_ring(struct jme_softc *sc) 2119 { 2120 struct jme_ring_data *rd; 2121 struct jme_txdesc *txd; 2122 int i; 2123 2124 sc->jme_cdata.jme_tx_prod = 0; 2125 sc->jme_cdata.jme_tx_cons = 0; 2126 sc->jme_cdata.jme_tx_cnt = 0; 2127 2128 rd = &sc->jme_rdata; 2129 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2130 for (i = 0; i < JME_TX_RING_CNT; i++) { 2131 txd = &sc->jme_cdata.jme_txdesc[i]; 2132 txd->tx_m = NULL; 2133 txd->tx_desc = &rd->jme_tx_ring[i]; 2134 txd->tx_ndesc = 0; 2135 } 2136 2137 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 2138 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2139 } 2140 2141 void 2142 jme_init_ssb(struct jme_softc *sc) 2143 { 2144 struct jme_ring_data *rd; 2145 2146 rd = &sc->jme_rdata; 2147 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2148 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0, 2149 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2150 } 2151 2152 int 2153 jme_init_rx_ring(struct jme_softc *sc) 2154 { 2155 struct jme_ring_data *rd; 2156 struct jme_rxdesc *rxd; 2157 int i; 2158 2159 KASSERT(sc->jme_cdata.jme_rxhead == NULL && 2160 sc->jme_cdata.jme_rxtail == NULL && 2161 sc->jme_cdata.jme_rxlen == 0); 2162 sc->jme_cdata.jme_rx_cons = 0; 2163 2164 rd = &sc->jme_rdata; 2165 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2166 for (i = 0; i < JME_RX_RING_CNT; i++) { 2167 int error; 2168 2169 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2170 rxd->rx_m = NULL; 2171 rxd->rx_desc = &rd->jme_rx_ring[i]; 2172 error = jme_newbuf(sc, rxd, 1); 2173 if (error) 2174 return (error); 2175 } 2176 2177 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 2178 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2179 2180 return (0); 2181 } 2182 2183 int 2184 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init) 2185 { 2186 struct jme_desc *desc; 2187 struct mbuf *m; 2188 bus_dmamap_t map; 2189 int error; 2190 2191 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2192 if (m == NULL) 2193 return (ENOBUFS); 2194 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2195 if (!(m->m_flags & M_EXT)) { 2196 m_freem(m); 2197 return (ENOBUFS); 2198 } 2199 2200 /* 2201 * JMC250 has 64bit boundary alignment limitation so jme(4) 2202 * takes advantage of 10 bytes padding feature of hardware 2203 * in order not to copy entire frame to align IP header on 2204 * 32bit boundary. 2205 */ 2206 m->m_len = m->m_pkthdr.len = MCLBYTES; 2207 2208 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2209 sc->jme_cdata.jme_rx_sparemap, 2210 m, BUS_DMA_NOWAIT); 2211 if (error != 0) { 2212 if (!error) { 2213 bus_dmamap_unload(sc->sc_dmat, 2214 sc->jme_cdata.jme_rx_sparemap); 2215 error = EFBIG; 2216 printf("%s: too many segments?!\n", 2217 sc->sc_dev.dv_xname); 2218 } 2219 m_freem(m); 2220 2221 if (init) 2222 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2223 return (error); 2224 } 2225 2226 if (rxd->rx_m != NULL) { 2227 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2228 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2229 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2230 } 2231 map = rxd->rx_dmamap; 2232 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2233 sc->jme_cdata.jme_rx_sparemap = map; 2234 rxd->rx_m = m; 2235 2236 desc = rxd->rx_desc; 2237 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len); 2238 desc->addr_lo = 2239 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2240 desc->addr_hi = 2241 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2242 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2243 2244 return (0); 2245 } 2246 2247 void 2248 jme_set_vlan(struct jme_softc *sc) 2249 { 2250 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2251 uint32_t reg; 2252 2253 reg = CSR_READ_4(sc, JME_RXMAC); 2254 reg &= ~RXMAC_VLAN_ENB; 2255 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2256 reg |= RXMAC_VLAN_ENB; 2257 CSR_WRITE_4(sc, JME_RXMAC, reg); 2258 } 2259 2260 void 2261 jme_iff(struct jme_softc *sc) 2262 { 2263 struct arpcom *ac = &sc->sc_arpcom; 2264 struct ifnet *ifp = &ac->ac_if; 2265 struct ether_multi *enm; 2266 struct ether_multistep step; 2267 uint32_t crc; 2268 uint32_t mchash[2]; 2269 uint32_t rxcfg; 2270 2271 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2272 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2273 RXMAC_ALLMULTI); 2274 ifp->if_flags &= ~IFF_ALLMULTI; 2275 2276 /* 2277 * Always accept frames destined to our station address. 2278 * Always accept broadcast frames. 2279 */ 2280 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2281 2282 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2283 ifp->if_flags |= IFF_ALLMULTI; 2284 if (ifp->if_flags & IFF_PROMISC) 2285 rxcfg |= RXMAC_PROMISC; 2286 else 2287 rxcfg |= RXMAC_ALLMULTI; 2288 mchash[0] = mchash[1] = 0xFFFFFFFF; 2289 } else { 2290 /* 2291 * Set up the multicast address filter by passing all 2292 * multicast addresses through a CRC generator, and then 2293 * using the low-order 6 bits as an index into the 64 bit 2294 * multicast hash table. The high order bits select the 2295 * register, while the rest of the bits select the bit 2296 * within the register. 2297 */ 2298 rxcfg |= RXMAC_MULTICAST; 2299 bzero(mchash, sizeof(mchash)); 2300 2301 ETHER_FIRST_MULTI(step, ac, enm); 2302 while (enm != NULL) { 2303 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2304 2305 /* Just want the 6 least significant bits. */ 2306 crc &= 0x3f; 2307 2308 /* Set the corresponding bit in the hash table. */ 2309 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2310 2311 ETHER_NEXT_MULTI(step, enm); 2312 } 2313 } 2314 2315 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2316 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2317 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2318 } 2319