1 /* $OpenBSD: if_jme.c,v 1.27 2012/02/28 03:58:16 jsg Exp $ */ 2 /*- 3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $ 30 */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/in_var.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <net/if_types.h> 62 #include <net/if_vlan_var.h> 63 64 #if NBPFILTER > 0 65 #include <net/bpf.h> 66 #endif 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/mii/jmphyreg.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_jmereg.h> 77 #include <dev/pci/if_jmevar.h> 78 79 /* Define the following to disable printing Rx errors. */ 80 #undef JME_SHOW_ERRORS 81 82 int jme_match(struct device *, void *, void *); 83 void jme_attach(struct device *, struct device *, void *); 84 int jme_detach(struct device *, int); 85 86 int jme_miibus_readreg(struct device *, int, int); 87 void jme_miibus_writereg(struct device *, int, int, int); 88 void jme_miibus_statchg(struct device *); 89 90 int jme_init(struct ifnet *); 91 int jme_ioctl(struct ifnet *, u_long, caddr_t); 92 93 void jme_start(struct ifnet *); 94 void jme_watchdog(struct ifnet *); 95 void jme_mediastatus(struct ifnet *, struct ifmediareq *); 96 int jme_mediachange(struct ifnet *); 97 98 int jme_intr(void *); 99 void jme_txeof(struct jme_softc *); 100 void jme_rxeof(struct jme_softc *); 101 102 int jme_dma_alloc(struct jme_softc *); 103 void jme_dma_free(struct jme_softc *); 104 int jme_init_rx_ring(struct jme_softc *); 105 void jme_init_tx_ring(struct jme_softc *); 106 void jme_init_ssb(struct jme_softc *); 107 int jme_newbuf(struct jme_softc *, struct jme_rxdesc *); 108 int jme_encap(struct jme_softc *, struct mbuf **); 109 void jme_rxpkt(struct jme_softc *); 110 111 void jme_tick(void *); 112 void jme_stop(struct jme_softc *); 113 void jme_reset(struct jme_softc *); 114 void jme_set_vlan(struct jme_softc *); 115 void jme_iff(struct jme_softc *); 116 void jme_stop_tx(struct jme_softc *); 117 void jme_stop_rx(struct jme_softc *); 118 void jme_mac_config(struct jme_softc *); 119 void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 120 int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 121 int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 122 void jme_discard_rxbufs(struct jme_softc *, int, int); 123 #ifdef notyet 124 void jme_setwol(struct jme_softc *); 125 void jme_setlinkspeed(struct jme_softc *); 126 #endif 127 128 /* 129 * Devices supported by this driver. 130 */ 131 const struct pci_matchid jme_devices[] = { 132 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 }, 133 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 } 134 }; 135 136 struct cfattach jme_ca = { 137 sizeof (struct jme_softc), jme_match, jme_attach 138 }; 139 140 struct cfdriver jme_cd = { 141 NULL, "jme", DV_IFNET 142 }; 143 144 int jmedebug = 0; 145 #define DPRINTF(x) do { if (jmedebug) printf x; } while (0) 146 147 /* 148 * Read a PHY register on the MII of the JMC250. 149 */ 150 int 151 jme_miibus_readreg(struct device *dev, int phy, int reg) 152 { 153 struct jme_softc *sc = (struct jme_softc *)dev; 154 uint32_t val; 155 int i; 156 157 /* For FPGA version, PHY address 0 should be ignored. */ 158 if (sc->jme_caps & JME_CAP_FPGA) { 159 if (phy == 0) 160 return (0); 161 } else { 162 if (sc->jme_phyaddr != phy) 163 return (0); 164 } 165 166 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 167 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 168 169 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 170 DELAY(1); 171 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 172 break; 173 } 174 if (i == 0) { 175 printf("%s: phy read timeout: phy %d, reg %d\n", 176 sc->sc_dev.dv_xname, phy, reg); 177 return (0); 178 } 179 180 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 181 } 182 183 /* 184 * Write a PHY register on the MII of the JMC250. 185 */ 186 void 187 jme_miibus_writereg(struct device *dev, int phy, int reg, int val) 188 { 189 struct jme_softc *sc = (struct jme_softc *)dev; 190 int i; 191 192 /* For FPGA version, PHY address 0 should be ignored. */ 193 if (sc->jme_caps & JME_CAP_FPGA) { 194 if (phy == 0) 195 return; 196 } else { 197 if (sc->jme_phyaddr != phy) 198 return; 199 } 200 201 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 202 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 203 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 204 205 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 206 DELAY(1); 207 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 208 break; 209 } 210 if (i == 0) { 211 printf("%s: phy write timeout: phy %d, reg %d\n", 212 sc->sc_dev.dv_xname, phy, reg); 213 } 214 } 215 216 /* 217 * Callback from MII layer when media changes. 218 */ 219 void 220 jme_miibus_statchg(struct device *dev) 221 { 222 struct jme_softc *sc = (struct jme_softc *)dev; 223 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 224 struct mii_data *mii; 225 struct jme_txdesc *txd; 226 bus_addr_t paddr; 227 int i; 228 229 if ((ifp->if_flags & IFF_RUNNING) == 0) 230 return; 231 232 mii = &sc->sc_miibus; 233 234 sc->jme_flags &= ~JME_FLAG_LINK; 235 if ((mii->mii_media_status & IFM_AVALID) != 0) { 236 switch (IFM_SUBTYPE(mii->mii_media_active)) { 237 case IFM_10_T: 238 case IFM_100_TX: 239 sc->jme_flags |= JME_FLAG_LINK; 240 break; 241 case IFM_1000_T: 242 if (sc->jme_caps & JME_CAP_FASTETH) 243 break; 244 sc->jme_flags |= JME_FLAG_LINK; 245 break; 246 default: 247 break; 248 } 249 } 250 251 /* 252 * Disabling Rx/Tx MACs have a side-effect of resetting 253 * JME_TXNDA/JME_RXNDA register to the first address of 254 * Tx/Rx descriptor address. So driver should reset its 255 * internal procucer/consumer pointer and reclaim any 256 * allocated resources. Note, just saving the value of 257 * JME_TXNDA and JME_RXNDA registers before stopping MAC 258 * and restoring JME_TXNDA/JME_RXNDA register is not 259 * sufficient to make sure correct MAC state because 260 * stopping MAC operation can take a while and hardware 261 * might have updated JME_TXNDA/JME_RXNDA registers 262 * during the stop operation. 263 */ 264 265 /* Disable interrupts */ 266 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 267 268 /* Stop driver */ 269 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 270 ifp->if_timer = 0; 271 timeout_del(&sc->jme_tick_ch); 272 273 /* Stop receiver/transmitter. */ 274 jme_stop_rx(sc); 275 jme_stop_tx(sc); 276 277 jme_rxeof(sc); 278 if (sc->jme_cdata.jme_rxhead != NULL) 279 m_freem(sc->jme_cdata.jme_rxhead); 280 JME_RXCHAIN_RESET(sc); 281 282 jme_txeof(sc); 283 if (sc->jme_cdata.jme_tx_cnt != 0) { 284 /* Remove queued packets for transmit. */ 285 for (i = 0; i < JME_TX_RING_CNT; i++) { 286 txd = &sc->jme_cdata.jme_txdesc[i]; 287 if (txd->tx_m != NULL) { 288 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 289 m_freem(txd->tx_m); 290 txd->tx_m = NULL; 291 txd->tx_ndesc = 0; 292 ifp->if_oerrors++; 293 } 294 } 295 } 296 297 /* 298 * Reuse configured Rx descriptors and reset 299 * procuder/consumer index. 300 */ 301 sc->jme_cdata.jme_rx_cons = 0; 302 303 jme_init_tx_ring(sc); 304 305 /* Initialize shadow status block. */ 306 jme_init_ssb(sc); 307 308 /* Program MAC with resolved speed/duplex/flow-control. */ 309 if (sc->jme_flags & JME_FLAG_LINK) { 310 jme_mac_config(sc); 311 312 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 313 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 314 315 /* Set Tx ring address to the hardware. */ 316 paddr = JME_TX_RING_ADDR(sc, 0); 317 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 318 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 319 320 /* Set Rx ring address to the hardware. */ 321 paddr = JME_RX_RING_ADDR(sc, 0); 322 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 323 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 324 325 /* Restart receiver/transmitter. */ 326 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 327 RXCSR_RXQ_START); 328 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 329 } 330 331 ifp->if_flags |= IFF_RUNNING; 332 ifp->if_flags &= ~IFF_OACTIVE; 333 timeout_add_sec(&sc->jme_tick_ch, 1); 334 335 /* Reenable interrupts. */ 336 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 337 } 338 339 /* 340 * Get the current interface media status. 341 */ 342 void 343 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 344 { 345 struct jme_softc *sc = ifp->if_softc; 346 struct mii_data *mii = &sc->sc_miibus; 347 348 mii_pollstat(mii); 349 ifmr->ifm_status = mii->mii_media_status; 350 ifmr->ifm_active = mii->mii_media_active; 351 } 352 353 /* 354 * Set hardware to newly-selected media. 355 */ 356 int 357 jme_mediachange(struct ifnet *ifp) 358 { 359 struct jme_softc *sc = ifp->if_softc; 360 struct mii_data *mii = &sc->sc_miibus; 361 int error; 362 363 if (mii->mii_instance != 0) { 364 struct mii_softc *miisc; 365 366 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 367 mii_phy_reset(miisc); 368 } 369 error = mii_mediachg(mii); 370 371 return (error); 372 } 373 374 int 375 jme_match(struct device *dev, void *match, void *aux) 376 { 377 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices, 378 sizeof (jme_devices) / sizeof (jme_devices[0])); 379 } 380 381 int 382 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 383 { 384 uint32_t reg; 385 int i; 386 387 *val = 0; 388 for (i = JME_TIMEOUT; i > 0; i--) { 389 reg = CSR_READ_4(sc, JME_SMBCSR); 390 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 391 break; 392 DELAY(1); 393 } 394 395 if (i == 0) { 396 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname); 397 return (ETIMEDOUT); 398 } 399 400 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 401 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 402 for (i = JME_TIMEOUT; i > 0; i--) { 403 DELAY(1); 404 reg = CSR_READ_4(sc, JME_SMBINTF); 405 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 406 break; 407 } 408 409 if (i == 0) { 410 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname); 411 return (ETIMEDOUT); 412 } 413 414 reg = CSR_READ_4(sc, JME_SMBINTF); 415 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 416 417 return (0); 418 } 419 420 int 421 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 422 { 423 uint8_t fup, reg, val; 424 uint32_t offset; 425 int match; 426 427 offset = 0; 428 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 429 fup != JME_EEPROM_SIG0) 430 return (ENOENT); 431 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 432 fup != JME_EEPROM_SIG1) 433 return (ENOENT); 434 match = 0; 435 do { 436 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 437 break; 438 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 439 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 440 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 441 break; 442 if (reg >= JME_PAR0 && 443 reg < JME_PAR0 + ETHER_ADDR_LEN) { 444 if (jme_eeprom_read_byte(sc, offset + 2, 445 &val) != 0) 446 break; 447 eaddr[reg - JME_PAR0] = val; 448 match++; 449 } 450 } 451 /* Check for the end of EEPROM descriptor. */ 452 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 453 break; 454 /* Try next eeprom descriptor. */ 455 offset += JME_EEPROM_DESC_BYTES; 456 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 457 458 if (match == ETHER_ADDR_LEN) 459 return (0); 460 461 return (ENOENT); 462 } 463 464 void 465 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 466 { 467 uint32_t par0, par1; 468 469 /* Read station address. */ 470 par0 = CSR_READ_4(sc, JME_PAR0); 471 par1 = CSR_READ_4(sc, JME_PAR1); 472 par1 &= 0xFFFF; 473 474 eaddr[0] = (par0 >> 0) & 0xFF; 475 eaddr[1] = (par0 >> 8) & 0xFF; 476 eaddr[2] = (par0 >> 16) & 0xFF; 477 eaddr[3] = (par0 >> 24) & 0xFF; 478 eaddr[4] = (par1 >> 0) & 0xFF; 479 eaddr[5] = (par1 >> 8) & 0xFF; 480 } 481 482 void 483 jme_attach(struct device *parent, struct device *self, void *aux) 484 { 485 struct jme_softc *sc = (struct jme_softc *)self; 486 struct pci_attach_args *pa = aux; 487 pci_chipset_tag_t pc = pa->pa_pc; 488 pci_intr_handle_t ih; 489 const char *intrstr; 490 pcireg_t memtype; 491 492 struct ifnet *ifp; 493 uint32_t reg; 494 int error = 0; 495 496 /* 497 * Allocate IO memory 498 * 499 * JMC250 supports both memory mapped and I/O register space 500 * access. Because I/O register access should use different 501 * BARs to access registers it's waste of time to use I/O 502 * register spce access. JMC250 uses 16K to map entire memory 503 * space. 504 */ 505 506 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR); 507 if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt, 508 &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) { 509 printf(": can't map mem space\n"); 510 return; 511 } 512 513 if (pci_intr_map(pa, &ih) != 0) { 514 printf(": can't map interrupt\n"); 515 return; 516 } 517 518 /* 519 * Allocate IRQ 520 */ 521 intrstr = pci_intr_string(pc, ih); 522 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc, 523 sc->sc_dev.dv_xname); 524 if (sc->sc_irq_handle == NULL) { 525 printf(": could not establish interrupt"); 526 if (intrstr != NULL) 527 printf(" at %s", intrstr); 528 printf("\n"); 529 return; 530 } 531 printf(": %s", intrstr); 532 533 sc->sc_dmat = pa->pa_dmat; 534 sc->jme_pct = pa->pa_pc; 535 sc->jme_pcitag = pa->pa_tag; 536 537 /* 538 * Extract FPGA revision 539 */ 540 reg = CSR_READ_4(sc, JME_CHIPMODE); 541 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 542 CHIPMODE_NOT_FPGA) { 543 sc->jme_caps |= JME_CAP_FPGA; 544 545 if (jmedebug) { 546 printf("%s: FPGA revision : 0x%04x\n", 547 sc->sc_dev.dv_xname, 548 (reg & CHIPMODE_FPGA_REV_MASK) >> 549 CHIPMODE_FPGA_REV_SHIFT); 550 } 551 } 552 553 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT; 554 555 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 && 556 PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2) 557 sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS; 558 559 /* Reset the ethernet controller. */ 560 jme_reset(sc); 561 562 /* Get station address. */ 563 reg = CSR_READ_4(sc, JME_SMBCSR); 564 if (reg & SMBCSR_EEPROM_PRESENT) 565 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr); 566 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 567 if (error != 0 && (jmedebug)) { 568 printf("%s: ethernet hardware address " 569 "not found in EEPROM.\n", sc->sc_dev.dv_xname); 570 } 571 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr); 572 } 573 574 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 575 576 /* 577 * Save PHY address. 578 * Integrated JR0211 has fixed PHY address whereas FPGA version 579 * requires PHY probing to get correct PHY address. 580 */ 581 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 582 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 583 GPREG0_PHY_ADDR_MASK; 584 if (jmedebug) { 585 printf("%s: PHY is at address %d.\n", 586 sc->sc_dev.dv_xname, sc->jme_phyaddr); 587 } 588 } else { 589 sc->jme_phyaddr = 0; 590 } 591 592 /* Set max allowable DMA size. */ 593 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 594 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 595 596 #ifdef notyet 597 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 598 sc->jme_caps |= JME_CAP_PMCAP; 599 #endif 600 601 /* Allocate DMA stuffs */ 602 error = jme_dma_alloc(sc); 603 if (error) 604 goto fail; 605 606 ifp = &sc->sc_arpcom.ac_if; 607 ifp->if_softc = sc; 608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 609 ifp->if_ioctl = jme_ioctl; 610 ifp->if_start = jme_start; 611 ifp->if_watchdog = jme_watchdog; 612 ifp->if_baudrate = IF_Gbps(1); 613 IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1); 614 IFQ_SET_READY(&ifp->if_snd); 615 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 616 617 ifp->if_capabilities = IFCAP_VLAN_MTU; 618 619 #ifdef JME_CHECKSUM 620 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 621 IFCAP_CSUM_UDPv4; 622 #endif 623 624 #if NVLAN > 0 625 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 626 #endif 627 628 /* Set up MII bus. */ 629 sc->sc_miibus.mii_ifp = ifp; 630 sc->sc_miibus.mii_readreg = jme_miibus_readreg; 631 sc->sc_miibus.mii_writereg = jme_miibus_writereg; 632 sc->sc_miibus.mii_statchg = jme_miibus_statchg; 633 634 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange, 635 jme_mediastatus); 636 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 637 MII_OFFSET_ANY, 0); 638 639 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 640 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 641 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 642 0, NULL); 643 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 644 } else 645 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 646 647 /* 648 * Save PHYADDR for FPGA mode PHY not handled, not production hw 649 */ 650 651 if_attach(ifp); 652 ether_ifattach(ifp); 653 654 timeout_set(&sc->jme_tick_ch, jme_tick, sc); 655 656 return; 657 fail: 658 jme_detach(&sc->sc_dev, 0); 659 } 660 661 int 662 jme_detach(struct device *self, int flags) 663 { 664 struct jme_softc *sc = (struct jme_softc *)self; 665 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 666 int s; 667 668 s = splnet(); 669 jme_stop(sc); 670 splx(s); 671 672 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 673 674 /* Delete all remaining media. */ 675 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 676 677 ether_ifdetach(ifp); 678 if_detach(ifp); 679 jme_dma_free(sc); 680 681 if (sc->sc_irq_handle != NULL) { 682 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle); 683 sc->sc_irq_handle = NULL; 684 } 685 686 return (0); 687 } 688 689 int 690 jme_dma_alloc(struct jme_softc *sc) 691 { 692 struct jme_txdesc *txd; 693 struct jme_rxdesc *rxd; 694 int error, i, nsegs; 695 696 /* 697 * Create DMA stuffs for TX ring 698 */ 699 700 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1, 701 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT, 702 &sc->jme_cdata.jme_tx_ring_map); 703 if (error) 704 return (ENOBUFS); 705 706 /* Allocate DMA'able memory for TX ring */ 707 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0, 708 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs, 709 BUS_DMA_WAITOK); 710 /* XXX zero */ 711 if (error) { 712 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 713 sc->sc_dev.dv_xname); 714 return error; 715 } 716 717 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg, 718 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring, 719 BUS_DMA_NOWAIT); 720 if (error) 721 return (ENOBUFS); 722 723 /* Load the DMA map for Tx ring. */ 724 error = bus_dmamap_load(sc->sc_dmat, 725 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 726 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 727 if (error) { 728 printf("%s: could not load DMA'able memory for Tx ring.\n", 729 sc->sc_dev.dv_xname); 730 bus_dmamem_free(sc->sc_dmat, 731 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1); 732 return error; 733 } 734 sc->jme_rdata.jme_tx_ring_paddr = 735 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr; 736 737 /* 738 * Create DMA stuffs for RX ring 739 */ 740 741 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1, 742 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT, 743 &sc->jme_cdata.jme_rx_ring_map); 744 if (error) 745 return (ENOBUFS); 746 747 /* Allocate DMA'able memory for RX ring */ 748 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0, 749 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs, 750 BUS_DMA_WAITOK | BUS_DMA_ZERO); 751 /* XXX zero */ 752 if (error) { 753 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 754 sc->sc_dev.dv_xname); 755 return error; 756 } 757 758 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg, 759 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring, 760 BUS_DMA_NOWAIT); 761 if (error) 762 return (ENOBUFS); 763 764 /* Load the DMA map for Rx ring. */ 765 error = bus_dmamap_load(sc->sc_dmat, 766 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 767 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 768 if (error) { 769 printf("%s: could not load DMA'able memory for Rx ring.\n", 770 sc->sc_dev.dv_xname); 771 bus_dmamem_free(sc->sc_dmat, 772 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 773 return error; 774 } 775 sc->jme_rdata.jme_rx_ring_paddr = 776 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr; 777 778 #if 0 779 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 780 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE; 781 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE; 782 if ((JME_ADDR_HI(tx_ring_end) != 783 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 784 (JME_ADDR_HI(rx_ring_end) != 785 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 786 printf("%s: 4GB boundary crossed, switching to 32bit " 787 "DMA address mode.\n", sc->sc_dev.dv_xname); 788 jme_dma_free(sc); 789 /* Limit DMA address space to 32bit and try again. */ 790 lowaddr = BUS_SPACE_MAXADDR_32BIT; 791 goto again; 792 } 793 #endif 794 795 /* 796 * Create DMA stuffs for shadow status block 797 */ 798 799 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1, 800 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map); 801 if (error) 802 return (ENOBUFS); 803 804 /* Allocate DMA'able memory for shared status block. */ 805 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0, 806 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK); 807 if (error) { 808 printf("%s: could not allocate DMA'able " 809 "memory for shared status block.\n", sc->sc_dev.dv_xname); 810 return error; 811 } 812 813 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg, 814 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block, 815 BUS_DMA_NOWAIT); 816 if (error) 817 return (ENOBUFS); 818 819 /* Load the DMA map for shared status block */ 820 error = bus_dmamap_load(sc->sc_dmat, 821 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 822 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT); 823 if (error) { 824 printf("%s: could not load DMA'able memory " 825 "for shared status block.\n", sc->sc_dev.dv_xname); 826 bus_dmamem_free(sc->sc_dmat, 827 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 828 return error; 829 } 830 sc->jme_rdata.jme_ssb_block_paddr = 831 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr; 832 833 /* 834 * Create DMA stuffs for TX buffers 835 */ 836 837 /* Create DMA maps for Tx buffers. */ 838 for (i = 0; i < JME_TX_RING_CNT; i++) { 839 txd = &sc->jme_cdata.jme_txdesc[i]; 840 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE, 841 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 842 &txd->tx_dmamap); 843 if (error) { 844 int j; 845 846 printf("%s: could not create %dth Tx dmamap.\n", 847 sc->sc_dev.dv_xname, i); 848 849 for (j = 0; j < i; ++j) { 850 txd = &sc->jme_cdata.jme_txdesc[j]; 851 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 852 } 853 return error; 854 } 855 856 } 857 858 /* 859 * Create DMA stuffs for RX buffers 860 */ 861 862 /* Create DMA maps for Rx buffers. */ 863 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 864 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap); 865 if (error) { 866 printf("%s: could not create spare Rx dmamap.\n", 867 sc->sc_dev.dv_xname); 868 return error; 869 } 870 for (i = 0; i < JME_RX_RING_CNT; i++) { 871 rxd = &sc->jme_cdata.jme_rxdesc[i]; 872 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 873 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 874 if (error) { 875 int j; 876 877 printf("%s: could not create %dth Rx dmamap.\n", 878 sc->sc_dev.dv_xname, i); 879 880 for (j = 0; j < i; ++j) { 881 rxd = &sc->jme_cdata.jme_rxdesc[j]; 882 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 883 } 884 bus_dmamap_destroy(sc->sc_dmat, 885 sc->jme_cdata.jme_rx_sparemap); 886 sc->jme_cdata.jme_rx_tag = NULL; 887 return error; 888 } 889 } 890 891 return 0; 892 } 893 894 void 895 jme_dma_free(struct jme_softc *sc) 896 { 897 struct jme_txdesc *txd; 898 struct jme_rxdesc *rxd; 899 int i; 900 901 /* Tx ring */ 902 bus_dmamap_unload(sc->sc_dmat, 903 sc->jme_cdata.jme_tx_ring_map); 904 bus_dmamem_free(sc->sc_dmat, 905 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1); 906 907 /* Rx ring */ 908 bus_dmamap_unload(sc->sc_dmat, 909 sc->jme_cdata.jme_rx_ring_map); 910 bus_dmamem_free(sc->sc_dmat, 911 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 912 913 /* Tx buffers */ 914 for (i = 0; i < JME_TX_RING_CNT; i++) { 915 txd = &sc->jme_cdata.jme_txdesc[i]; 916 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 917 } 918 919 /* Rx buffers */ 920 for (i = 0; i < JME_RX_RING_CNT; i++) { 921 rxd = &sc->jme_cdata.jme_rxdesc[i]; 922 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 923 } 924 bus_dmamap_destroy(sc->sc_dmat, 925 sc->jme_cdata.jme_rx_sparemap); 926 927 /* Shadow status block. */ 928 bus_dmamap_unload(sc->sc_dmat, 929 sc->jme_cdata.jme_ssb_map); 930 bus_dmamem_free(sc->sc_dmat, 931 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 932 } 933 934 #ifdef notyet 935 /* 936 * Unlike other ethernet controllers, JMC250 requires 937 * explicit resetting link speed to 10/100Mbps as gigabit 938 * link will cunsume more power than 375mA. 939 * Note, we reset the link speed to 10/100Mbps with 940 * auto-negotiation but we don't know whether that operation 941 * would succeed or not as we have no control after powering 942 * off. If the renegotiation fail WOL may not work. Running 943 * at 1Gbps draws more power than 375mA at 3.3V which is 944 * specified in PCI specification and that would result in 945 * complete shutdowning power to ethernet controller. 946 * 947 * TODO 948 * Save current negotiated media speed/duplex/flow-control 949 * to softc and restore the same link again after resuming. 950 * PHY handling such as power down/resetting to 100Mbps 951 * may be better handled in suspend method in phy driver. 952 */ 953 void 954 jme_setlinkspeed(struct jme_softc *sc) 955 { 956 struct mii_data *mii; 957 int aneg, i; 958 959 JME_LOCK_ASSERT(sc); 960 961 mii = &sc->sc_miibus; 962 mii_pollstat(mii); 963 aneg = 0; 964 if ((mii->mii_media_status & IFM_AVALID) != 0) { 965 switch IFM_SUBTYPE(mii->mii_media_active) { 966 case IFM_10_T: 967 case IFM_100_TX: 968 return; 969 case IFM_1000_T: 970 aneg++; 971 default: 972 break; 973 } 974 } 975 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0); 976 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR, 977 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 978 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR, 979 BMCR_AUTOEN | BMCR_STARTNEG); 980 DELAY(1000); 981 if (aneg != 0) { 982 /* Poll link state until jme(4) get a 10/100 link. */ 983 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 984 mii_pollstat(mii); 985 if ((mii->mii_media_status & IFM_AVALID) != 0) { 986 switch (IFM_SUBTYPE(mii->mii_media_active)) { 987 case IFM_10_T: 988 case IFM_100_TX: 989 jme_mac_config(sc); 990 return; 991 default: 992 break; 993 } 994 } 995 JME_UNLOCK(sc); 996 pause("jmelnk", hz); 997 JME_LOCK(sc); 998 } 999 if (i == MII_ANEGTICKS_GIGE) 1000 printf("%s: establishing link failed, " 1001 "WOL may not work!\n", sc->sc_dev.dv_xname); 1002 } 1003 /* 1004 * No link, force MAC to have 100Mbps, full-duplex link. 1005 * This is the last resort and may/may not work. 1006 */ 1007 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1008 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1009 jme_mac_config(sc); 1010 } 1011 1012 void 1013 jme_setwol(struct jme_softc *sc) 1014 { 1015 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1016 uint32_t gpr, pmcs; 1017 uint16_t pmstat; 1018 int pmc; 1019 1020 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) { 1021 /* No PME capability, PHY power down. */ 1022 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1023 MII_BMCR, BMCR_PDOWN); 1024 return; 1025 } 1026 1027 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1028 pmcs = CSR_READ_4(sc, JME_PMCS); 1029 pmcs &= ~PMCS_WOL_ENB_MASK; 1030 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1031 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1032 /* Enable PME message. */ 1033 gpr |= GPREG0_PME_ENB; 1034 /* For gigabit controllers, reset link speed to 10/100. */ 1035 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1036 jme_setlinkspeed(sc); 1037 } 1038 1039 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1040 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1041 1042 /* Request PME. */ 1043 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2); 1044 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1045 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1046 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1047 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1048 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1049 /* No WOL, PHY power down. */ 1050 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1051 MII_BMCR, BMCR_PDOWN); 1052 } 1053 } 1054 #endif 1055 1056 int 1057 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1058 { 1059 struct jme_txdesc *txd; 1060 struct jme_desc *desc; 1061 struct mbuf *m; 1062 int maxsegs; 1063 int error, i, prod; 1064 uint32_t cflags; 1065 1066 prod = sc->jme_cdata.jme_tx_prod; 1067 txd = &sc->jme_cdata.jme_txdesc[prod]; 1068 1069 maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) - 1070 (JME_TXD_RSVD + 1); 1071 if (maxsegs > JME_MAXTXSEGS) 1072 maxsegs = JME_MAXTXSEGS; 1073 if (maxsegs < (sc->jme_txd_spare - 1)) 1074 panic("%s: not enough segments %d", sc->sc_dev.dv_xname, 1075 maxsegs); 1076 1077 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, 1078 *m_head, BUS_DMA_NOWAIT); 1079 if (error != 0) { 1080 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1081 error = EFBIG; 1082 } 1083 if (error == EFBIG) { 1084 if (m_defrag(*m_head, M_DONTWAIT)) { 1085 printf("%s: can't defrag TX mbuf\n", 1086 sc->sc_dev.dv_xname); 1087 m_freem(*m_head); 1088 *m_head = NULL; 1089 return (ENOBUFS); 1090 } 1091 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1092 txd->tx_dmamap, *m_head, 1093 BUS_DMA_NOWAIT); 1094 if (error != 0) { 1095 printf("%s: could not load defragged TX mbuf\n", 1096 sc->sc_dev.dv_xname); 1097 m_freem(*m_head); 1098 *m_head = NULL; 1099 return (error); 1100 } 1101 } else if (error) { 1102 printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname); 1103 return (error); 1104 } 1105 1106 m = *m_head; 1107 cflags = 0; 1108 1109 /* Configure checksum offload. */ 1110 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1111 cflags |= JME_TD_IPCSUM; 1112 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1113 cflags |= JME_TD_TCPCSUM; 1114 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1115 cflags |= JME_TD_UDPCSUM; 1116 1117 #if NVLAN > 0 1118 /* Configure VLAN. */ 1119 if (m->m_flags & M_VLANTAG) { 1120 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1121 cflags |= JME_TD_VLAN_TAG; 1122 } 1123 #endif 1124 1125 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1126 desc->flags = htole32(cflags); 1127 desc->buflen = 0; 1128 desc->addr_hi = htole32(m->m_pkthdr.len); 1129 desc->addr_lo = 0; 1130 sc->jme_cdata.jme_tx_cnt++; 1131 KASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD); 1132 JME_DESC_INC(prod, JME_TX_RING_CNT); 1133 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) { 1134 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1135 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1136 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len); 1137 desc->addr_hi = 1138 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr)); 1139 desc->addr_lo = 1140 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr)); 1141 1142 sc->jme_cdata.jme_tx_cnt++; 1143 KASSERT(sc->jme_cdata.jme_tx_cnt <= 1144 JME_TX_RING_CNT - JME_TXD_RSVD); 1145 JME_DESC_INC(prod, JME_TX_RING_CNT); 1146 } 1147 1148 /* Update producer index. */ 1149 sc->jme_cdata.jme_tx_prod = prod; 1150 /* 1151 * Finally request interrupt and give the first descriptor 1152 * owenership to hardware. 1153 */ 1154 desc = txd->tx_desc; 1155 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1156 1157 txd->tx_m = m; 1158 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + 1; 1159 1160 /* Sync descriptors. */ 1161 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1162 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1163 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1164 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1165 1166 return (0); 1167 } 1168 1169 void 1170 jme_start(struct ifnet *ifp) 1171 { 1172 struct jme_softc *sc = ifp->if_softc; 1173 struct mbuf *m_head; 1174 int enq = 0; 1175 1176 /* Reclaim transmitted frames. */ 1177 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1178 jme_txeof(sc); 1179 1180 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1181 return; 1182 if ((sc->jme_flags & JME_FLAG_LINK) == 0) 1183 return; 1184 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1185 return; 1186 1187 for (;;) { 1188 /* 1189 * Check number of available TX descs, always 1190 * leave JME_TXD_RSVD free TX descs. 1191 */ 1192 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare > 1193 JME_TX_RING_CNT - JME_TXD_RSVD) { 1194 ifp->if_flags |= IFF_OACTIVE; 1195 break; 1196 } 1197 1198 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1199 if (m_head == NULL) 1200 break; 1201 1202 /* 1203 * Pack the data into the transmit ring. If we 1204 * don't have room, set the OACTIVE flag and wait 1205 * for the NIC to drain the ring. 1206 */ 1207 if (jme_encap(sc, &m_head)) { 1208 if (m_head == NULL) { 1209 ifp->if_oerrors++; 1210 break; 1211 } 1212 ifp->if_flags |= IFF_OACTIVE; 1213 break; 1214 } 1215 enq++; 1216 1217 #if NBPFILTER > 0 1218 /* 1219 * If there's a BPF listener, bounce a copy of this frame 1220 * to him. 1221 */ 1222 if (ifp->if_bpf != NULL) 1223 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1224 #endif 1225 } 1226 1227 if (enq > 0) { 1228 /* 1229 * Reading TXCSR takes very long time under heavy load 1230 * so cache TXCSR value and writes the ORed value with 1231 * the kick command to the TXCSR. This saves one register 1232 * access cycle. 1233 */ 1234 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1235 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1236 /* Set a timeout in case the chip goes out to lunch. */ 1237 ifp->if_timer = JME_TX_TIMEOUT; 1238 } 1239 } 1240 1241 void 1242 jme_watchdog(struct ifnet *ifp) 1243 { 1244 struct jme_softc *sc = ifp->if_softc; 1245 1246 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1247 printf("%s: watchdog timeout (missed link)\n", 1248 sc->sc_dev.dv_xname); 1249 ifp->if_oerrors++; 1250 jme_init(ifp); 1251 return; 1252 } 1253 1254 jme_txeof(sc); 1255 if (sc->jme_cdata.jme_tx_cnt == 0) { 1256 printf("%s: watchdog timeout (missed Tx interrupts) " 1257 "-- recovering\n", sc->sc_dev.dv_xname); 1258 jme_start(ifp); 1259 return; 1260 } 1261 1262 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1263 ifp->if_oerrors++; 1264 jme_init(ifp); 1265 jme_start(ifp); 1266 } 1267 1268 int 1269 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1270 { 1271 struct jme_softc *sc = ifp->if_softc; 1272 struct mii_data *mii = &sc->sc_miibus; 1273 struct ifaddr *ifa = (struct ifaddr *)data; 1274 struct ifreq *ifr = (struct ifreq *)data; 1275 int error = 0, s; 1276 1277 s = splnet(); 1278 1279 switch (cmd) { 1280 case SIOCSIFADDR: 1281 ifp->if_flags |= IFF_UP; 1282 if (!(ifp->if_flags & IFF_RUNNING)) 1283 jme_init(ifp); 1284 #ifdef INET 1285 if (ifa->ifa_addr->sa_family == AF_INET) 1286 arp_ifinit(&sc->sc_arpcom, ifa); 1287 #endif 1288 break; 1289 1290 case SIOCSIFFLAGS: 1291 if (ifp->if_flags & IFF_UP) { 1292 if (ifp->if_flags & IFF_RUNNING) 1293 error = ENETRESET; 1294 else 1295 jme_init(ifp); 1296 } else { 1297 if (ifp->if_flags & IFF_RUNNING) 1298 jme_stop(sc); 1299 } 1300 break; 1301 1302 case SIOCSIFMEDIA: 1303 case SIOCGIFMEDIA: 1304 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1305 break; 1306 1307 default: 1308 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1309 } 1310 1311 if (error == ENETRESET) { 1312 if (ifp->if_flags & IFF_RUNNING) 1313 jme_iff(sc); 1314 error = 0; 1315 } 1316 1317 splx(s); 1318 return (error); 1319 } 1320 1321 void 1322 jme_mac_config(struct jme_softc *sc) 1323 { 1324 struct mii_data *mii; 1325 uint32_t ghc, rxmac, txmac, txpause, gp1; 1326 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1327 1328 mii = &sc->sc_miibus; 1329 1330 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1331 DELAY(10); 1332 CSR_WRITE_4(sc, JME_GHC, 0); 1333 ghc = 0; 1334 rxmac = CSR_READ_4(sc, JME_RXMAC); 1335 rxmac &= ~RXMAC_FC_ENB; 1336 txmac = CSR_READ_4(sc, JME_TXMAC); 1337 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1338 txpause = CSR_READ_4(sc, JME_TXPFC); 1339 txpause &= ~TXPFC_PAUSE_ENB; 1340 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1341 ghc |= GHC_FULL_DUPLEX; 1342 rxmac &= ~RXMAC_COLL_DET_ENB; 1343 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1344 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1345 TXMAC_FRAME_BURST); 1346 #ifdef notyet 1347 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1348 txpause |= TXPFC_PAUSE_ENB; 1349 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1350 rxmac |= RXMAC_FC_ENB; 1351 #endif 1352 /* Disable retry transmit timer/retry limit. */ 1353 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1354 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1355 } else { 1356 rxmac |= RXMAC_COLL_DET_ENB; 1357 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1358 /* Enable retry transmit timer/retry limit. */ 1359 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1360 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1361 } 1362 1363 /* 1364 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1365 */ 1366 gp1 = CSR_READ_4(sc, JME_GPREG1); 1367 gp1 &= ~GPREG1_HALF_PATCH; 1368 1369 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1370 hdx = 1; 1371 1372 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1373 case IFM_10_T: 1374 ghc |= GHC_SPEED_10; 1375 if (hdx) 1376 gp1 |= GPREG1_HALF_PATCH; 1377 break; 1378 1379 case IFM_100_TX: 1380 ghc |= GHC_SPEED_100; 1381 if (hdx) 1382 gp1 |= GPREG1_HALF_PATCH; 1383 1384 /* 1385 * Use extended FIFO depth to workaround CRC errors 1386 * emitted by chips before JMC250B 1387 */ 1388 phyconf = JMPHY_CONF_EXTFIFO; 1389 break; 1390 1391 case IFM_1000_T: 1392 if (sc->jme_caps & JME_CAP_FASTETH) 1393 break; 1394 1395 ghc |= GHC_SPEED_1000; 1396 if (hdx) 1397 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1398 break; 1399 1400 default: 1401 break; 1402 } 1403 1404 if (sc->jme_revfm >= 2) { 1405 /* set clock sources for tx mac and offload engine */ 1406 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1407 ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000; 1408 else 1409 ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100; 1410 } 1411 1412 CSR_WRITE_4(sc, JME_GHC, ghc); 1413 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1414 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1415 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1416 1417 if (sc->jme_workaround & JME_WA_CRCERRORS) { 1418 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1419 JMPHY_CONF, phyconf); 1420 } 1421 if (sc->jme_workaround & JME_WA_PACKETLOSS) 1422 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1423 } 1424 1425 int 1426 jme_intr(void *xsc) 1427 { 1428 struct jme_softc *sc = xsc; 1429 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1430 uint32_t status; 1431 int claimed = 0; 1432 1433 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1434 if (status == 0 || status == 0xFFFFFFFF) 1435 return (0); 1436 1437 /* Disable interrupts. */ 1438 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1439 1440 status = CSR_READ_4(sc, JME_INTR_STATUS); 1441 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1442 goto back; 1443 1444 /* Reset PCC counter/timer and Ack interrupts. */ 1445 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1446 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1447 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1448 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1449 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1450 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1451 1452 if (ifp->if_flags & IFF_RUNNING) { 1453 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1454 jme_rxeof(sc); 1455 1456 if (status & INTR_RXQ_DESC_EMPTY) { 1457 /* 1458 * Notify hardware availability of new Rx buffers. 1459 * Reading RXCSR takes very long time under heavy 1460 * load so cache RXCSR value and writes the ORed 1461 * value with the kick command to the RXCSR. This 1462 * saves one register access cycle. 1463 */ 1464 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1465 RXCSR_RX_ENB | RXCSR_RXQ_START); 1466 } 1467 1468 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1469 jme_txeof(sc); 1470 jme_start(ifp); 1471 } 1472 } 1473 claimed = 1; 1474 back: 1475 /* Reenable interrupts. */ 1476 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1477 1478 return (claimed); 1479 } 1480 1481 void 1482 jme_txeof(struct jme_softc *sc) 1483 { 1484 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1485 struct jme_txdesc *txd; 1486 uint32_t status; 1487 int cons, nsegs; 1488 1489 cons = sc->jme_cdata.jme_tx_cons; 1490 if (cons == sc->jme_cdata.jme_tx_prod) 1491 return; 1492 1493 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1494 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1495 1496 /* 1497 * Go through our Tx list and free mbufs for those 1498 * frames which have been transmitted. 1499 */ 1500 while (cons != sc->jme_cdata.jme_tx_prod) { 1501 txd = &sc->jme_cdata.jme_txdesc[cons]; 1502 1503 if (txd->tx_m == NULL) 1504 panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname); 1505 1506 status = letoh32(txd->tx_desc->flags); 1507 if ((status & JME_TD_OWN) == JME_TD_OWN) 1508 break; 1509 1510 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 1511 ifp->if_oerrors++; 1512 } else { 1513 ifp->if_opackets++; 1514 if (status & JME_TD_COLLISION) { 1515 ifp->if_collisions += 1516 letoh32(txd->tx_desc->buflen) & 1517 JME_TD_BUF_LEN_MASK; 1518 } 1519 } 1520 1521 /* 1522 * Only the first descriptor of multi-descriptor 1523 * transmission is updated so driver have to skip entire 1524 * chained buffers for the transmiited frame. In other 1525 * words, JME_TD_OWN bit is valid only at the first 1526 * descriptor of a multi-descriptor transmission. 1527 */ 1528 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1529 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 1530 JME_DESC_INC(cons, JME_TX_RING_CNT); 1531 } 1532 1533 /* Reclaim transferred mbufs. */ 1534 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1535 m_freem(txd->tx_m); 1536 txd->tx_m = NULL; 1537 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 1538 if (sc->jme_cdata.jme_tx_cnt < 0) 1539 panic("%s: Active Tx desc counter was garbled", 1540 sc->sc_dev.dv_xname); 1541 txd->tx_ndesc = 0; 1542 } 1543 sc->jme_cdata.jme_tx_cons = cons; 1544 1545 if (sc->jme_cdata.jme_tx_cnt == 0) 1546 ifp->if_timer = 0; 1547 1548 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <= 1549 JME_TX_RING_CNT - JME_TXD_RSVD) 1550 ifp->if_flags &= ~IFF_OACTIVE; 1551 1552 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1553 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1554 } 1555 1556 void 1557 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 1558 { 1559 int i; 1560 1561 for (i = 0; i < count; ++i) { 1562 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 1563 1564 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 1565 desc->buflen = htole32(MCLBYTES); 1566 JME_DESC_INC(cons, JME_RX_RING_CNT); 1567 } 1568 } 1569 1570 /* Receive a frame. */ 1571 void 1572 jme_rxpkt(struct jme_softc *sc) 1573 { 1574 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1575 struct jme_desc *desc; 1576 struct jme_rxdesc *rxd; 1577 struct mbuf *mp, *m; 1578 uint32_t flags, status; 1579 int cons, count, nsegs; 1580 1581 cons = sc->jme_cdata.jme_rx_cons; 1582 desc = &sc->jme_rdata.jme_rx_ring[cons]; 1583 flags = letoh32(desc->flags); 1584 status = letoh32(desc->buflen); 1585 nsegs = JME_RX_NSEGS(status); 1586 1587 if (status & JME_RX_ERR_STAT) { 1588 ifp->if_ierrors++; 1589 jme_discard_rxbufs(sc, cons, nsegs); 1590 #ifdef JME_SHOW_ERRORS 1591 printf("%s : receive error = 0x%b\n", 1592 sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS); 1593 #endif 1594 sc->jme_cdata.jme_rx_cons += nsegs; 1595 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1596 return; 1597 } 1598 1599 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 1600 for (count = 0; count < nsegs; count++, 1601 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 1602 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 1603 mp = rxd->rx_m; 1604 1605 /* Add a new receive buffer to the ring. */ 1606 if (jme_newbuf(sc, rxd) != 0) { 1607 ifp->if_iqdrops++; 1608 /* Reuse buffer. */ 1609 jme_discard_rxbufs(sc, cons, nsegs - count); 1610 if (sc->jme_cdata.jme_rxhead != NULL) { 1611 m_freem(sc->jme_cdata.jme_rxhead); 1612 JME_RXCHAIN_RESET(sc); 1613 } 1614 break; 1615 } 1616 1617 /* 1618 * Assume we've received a full sized frame. 1619 * Actual size is fixed when we encounter the end of 1620 * multi-segmented frame. 1621 */ 1622 mp->m_len = MCLBYTES; 1623 1624 /* Chain received mbufs. */ 1625 if (sc->jme_cdata.jme_rxhead == NULL) { 1626 sc->jme_cdata.jme_rxhead = mp; 1627 sc->jme_cdata.jme_rxtail = mp; 1628 } else { 1629 /* 1630 * Receive processor can receive a maximum frame 1631 * size of 65535 bytes. 1632 */ 1633 mp->m_flags &= ~M_PKTHDR; 1634 sc->jme_cdata.jme_rxtail->m_next = mp; 1635 sc->jme_cdata.jme_rxtail = mp; 1636 } 1637 1638 if (count == nsegs - 1) { 1639 /* Last desc. for this frame. */ 1640 m = sc->jme_cdata.jme_rxhead; 1641 /* XXX assert PKTHDR? */ 1642 m->m_flags |= M_PKTHDR; 1643 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 1644 if (nsegs > 1) { 1645 /* Set first mbuf size. */ 1646 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 1647 /* Set last mbuf size. */ 1648 mp->m_len = sc->jme_cdata.jme_rxlen - 1649 ((MCLBYTES - JME_RX_PAD_BYTES) + 1650 (MCLBYTES * (nsegs - 2))); 1651 } else { 1652 m->m_len = sc->jme_cdata.jme_rxlen; 1653 } 1654 m->m_pkthdr.rcvif = ifp; 1655 1656 /* 1657 * Account for 10bytes auto padding which is used 1658 * to align IP header on 32bit boundary. Also note, 1659 * CRC bytes is automatically removed by the 1660 * hardware. 1661 */ 1662 m->m_data += JME_RX_PAD_BYTES; 1663 1664 /* Set checksum information. */ 1665 if (flags & (JME_RD_IPV4|JME_RD_IPV6)) { 1666 if ((flags & JME_RD_IPV4) && 1667 (flags & JME_RD_IPCSUM)) 1668 m->m_pkthdr.csum_flags |= 1669 M_IPV4_CSUM_IN_OK; 1670 if ((flags & JME_RD_MORE_FRAG) == 0 && 1671 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 1672 (JME_RD_TCP | JME_RD_TCPCSUM) || 1673 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 1674 (JME_RD_UDP | JME_RD_UDPCSUM))) { 1675 m->m_pkthdr.csum_flags |= 1676 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1677 } 1678 } 1679 1680 #if NVLAN > 0 1681 /* Check for VLAN tagged packets. */ 1682 if (flags & JME_RD_VLAN_TAG) { 1683 m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK; 1684 m->m_flags |= M_VLANTAG; 1685 } 1686 #endif 1687 1688 #if NBPFILTER > 0 1689 if (ifp->if_bpf) 1690 bpf_mtap_ether(ifp->if_bpf, m, 1691 BPF_DIRECTION_IN); 1692 #endif 1693 1694 ifp->if_ipackets++; 1695 /* Pass it on. */ 1696 ether_input_mbuf(ifp, m); 1697 1698 /* Reset mbuf chains. */ 1699 JME_RXCHAIN_RESET(sc); 1700 } 1701 } 1702 1703 sc->jme_cdata.jme_rx_cons += nsegs; 1704 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1705 } 1706 1707 void 1708 jme_rxeof(struct jme_softc *sc) 1709 { 1710 struct jme_desc *desc; 1711 int nsegs, prog, pktlen; 1712 1713 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1714 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1715 1716 prog = 0; 1717 for (;;) { 1718 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 1719 if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 1720 break; 1721 if ((letoh32(desc->buflen) & JME_RD_VALID) == 0) 1722 break; 1723 1724 /* 1725 * Check number of segments against received bytes. 1726 * Non-matching value would indicate that hardware 1727 * is still trying to update Rx descriptors. I'm not 1728 * sure whether this check is needed. 1729 */ 1730 nsegs = JME_RX_NSEGS(letoh32(desc->buflen)); 1731 pktlen = JME_RX_BYTES(letoh32(desc->buflen)); 1732 if (nsegs != howmany(pktlen, MCLBYTES)) { 1733 printf("%s: RX fragment count(%d) " 1734 "and packet size(%d) mismach\n", 1735 sc->sc_dev.dv_xname, nsegs, pktlen); 1736 break; 1737 } 1738 1739 /* Received a frame. */ 1740 jme_rxpkt(sc); 1741 prog++; 1742 } 1743 1744 if (prog > 0) { 1745 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1746 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1747 } 1748 } 1749 1750 void 1751 jme_tick(void *xsc) 1752 { 1753 struct jme_softc *sc = xsc; 1754 struct mii_data *mii = &sc->sc_miibus; 1755 int s; 1756 1757 s = splnet(); 1758 mii_tick(mii); 1759 timeout_add_sec(&sc->jme_tick_ch, 1); 1760 splx(s); 1761 } 1762 1763 void 1764 jme_reset(struct jme_softc *sc) 1765 { 1766 #ifdef foo 1767 /* Stop receiver, transmitter. */ 1768 jme_stop_rx(sc); 1769 jme_stop_tx(sc); 1770 #endif 1771 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1772 DELAY(10); 1773 CSR_WRITE_4(sc, JME_GHC, 0); 1774 } 1775 1776 int 1777 jme_init(struct ifnet *ifp) 1778 { 1779 struct jme_softc *sc = ifp->if_softc; 1780 struct mii_data *mii; 1781 uint8_t eaddr[ETHER_ADDR_LEN]; 1782 bus_addr_t paddr; 1783 uint32_t reg; 1784 int error; 1785 1786 /* 1787 * Cancel any pending I/O. 1788 */ 1789 jme_stop(sc); 1790 1791 /* 1792 * Reset the chip to a known state. 1793 */ 1794 jme_reset(sc); 1795 1796 /* 1797 * Since we always use 64bit address mode for transmitting, 1798 * each Tx request requires one more dummy descriptor. 1799 */ 1800 sc->jme_txd_spare = 1801 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1; 1802 KASSERT(sc->jme_txd_spare >= 2); 1803 1804 /* Init descriptors. */ 1805 error = jme_init_rx_ring(sc); 1806 if (error != 0) { 1807 printf("%s: initialization failed: no memory for Rx buffers.\n", 1808 sc->sc_dev.dv_xname); 1809 jme_stop(sc); 1810 return (error); 1811 } 1812 jme_init_tx_ring(sc); 1813 1814 /* Initialize shadow status block. */ 1815 jme_init_ssb(sc); 1816 1817 /* Reprogram the station address. */ 1818 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1819 CSR_WRITE_4(sc, JME_PAR0, 1820 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 1821 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 1822 1823 /* 1824 * Configure Tx queue. 1825 * Tx priority queue weight value : 0 1826 * Tx FIFO threshold for processing next packet : 16QW 1827 * Maximum Tx DMA length : 512 1828 * Allow Tx DMA burst. 1829 */ 1830 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 1831 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 1832 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 1833 sc->jme_txcsr |= sc->jme_tx_dma_size; 1834 sc->jme_txcsr |= TXCSR_DMA_BURST; 1835 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 1836 1837 /* Set Tx descriptor counter. */ 1838 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 1839 1840 /* Set Tx ring address to the hardware. */ 1841 paddr = JME_TX_RING_ADDR(sc, 0); 1842 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 1843 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 1844 1845 /* Configure TxMAC parameters. */ 1846 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 1847 reg |= TXMAC_THRESH_1_PKT; 1848 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 1849 CSR_WRITE_4(sc, JME_TXMAC, reg); 1850 1851 /* 1852 * Configure Rx queue. 1853 * FIFO full threshold for transmitting Tx pause packet : 128T 1854 * FIFO threshold for processing next packet : 128QW 1855 * Rx queue 0 select 1856 * Max Rx DMA length : 128 1857 * Rx descriptor retry : 32 1858 * Rx descriptor retry time gap : 256ns 1859 * Don't receive runt/bad frame. 1860 */ 1861 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 1862 1863 /* 1864 * Since Rx FIFO size is 4K bytes, receiving frames larger 1865 * than 4K bytes will suffer from Rx FIFO overruns. So 1866 * decrease FIFO threshold to reduce the FIFO overruns for 1867 * frames larger than 4000 bytes. 1868 * For best performance of standard MTU sized frames use 1869 * maximum allowable FIFO threshold, which is 32QW for 1870 * chips with a full mask >= 2 otherwise 128QW. FIFO 1871 * thresholds of 64QW and 128QW are not valid for chips 1872 * with a full mask >= 2. 1873 */ 1874 if (sc->jme_revfm >= 2) 1875 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1876 else { 1877 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1878 ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE) 1879 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1880 else 1881 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 1882 } 1883 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 1884 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 1885 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 1886 /* XXX TODO DROP_BAD */ 1887 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 1888 1889 /* Set Rx descriptor counter. */ 1890 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 1891 1892 /* Set Rx ring address to the hardware. */ 1893 paddr = JME_RX_RING_ADDR(sc, 0); 1894 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 1895 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 1896 1897 /* Clear receive filter. */ 1898 CSR_WRITE_4(sc, JME_RXMAC, 0); 1899 1900 /* Set up the receive filter. */ 1901 jme_iff(sc); 1902 1903 jme_set_vlan(sc); 1904 1905 /* 1906 * Disable all WOL bits as WOL can interfere normal Rx 1907 * operation. Also clear WOL detection status bits. 1908 */ 1909 reg = CSR_READ_4(sc, JME_PMCS); 1910 reg &= ~PMCS_WOL_ENB_MASK; 1911 CSR_WRITE_4(sc, JME_PMCS, reg); 1912 1913 /* 1914 * Pad 10bytes right before received frame. This will greatly 1915 * help Rx performance on strict-alignment architectures as 1916 * it does not need to copy the frame to align the payload. 1917 */ 1918 reg = CSR_READ_4(sc, JME_RXMAC); 1919 reg |= RXMAC_PAD_10BYTES; 1920 reg |= RXMAC_CSUM_ENB; 1921 CSR_WRITE_4(sc, JME_RXMAC, reg); 1922 1923 /* Configure general purpose reg0 */ 1924 reg = CSR_READ_4(sc, JME_GPREG0); 1925 reg &= ~GPREG0_PCC_UNIT_MASK; 1926 /* Set PCC timer resolution to micro-seconds unit. */ 1927 reg |= GPREG0_PCC_UNIT_US; 1928 /* 1929 * Disable all shadow register posting as we have to read 1930 * JME_INTR_STATUS register in jme_intr. Also it seems 1931 * that it's hard to synchronize interrupt status between 1932 * hardware and software with shadow posting due to 1933 * requirements of bus_dmamap_sync(9). 1934 */ 1935 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 1936 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 1937 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 1938 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 1939 /* Disable posting of DW0. */ 1940 reg &= ~GPREG0_POST_DW0_ENB; 1941 /* Clear PME message. */ 1942 reg &= ~GPREG0_PME_ENB; 1943 /* Set PHY address. */ 1944 reg &= ~GPREG0_PHY_ADDR_MASK; 1945 reg |= sc->jme_phyaddr; 1946 CSR_WRITE_4(sc, JME_GPREG0, reg); 1947 1948 /* Configure Tx queue 0 packet completion coalescing. */ 1949 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1950 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 1951 PCCTX_COAL_TO_MASK; 1952 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1953 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 1954 PCCTX_COAL_PKT_MASK; 1955 reg |= PCCTX_COAL_TXQ0; 1956 CSR_WRITE_4(sc, JME_PCCTX, reg); 1957 1958 /* Configure Rx queue 0 packet completion coalescing. */ 1959 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1960 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 1961 PCCRX_COAL_TO_MASK; 1962 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1963 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 1964 PCCRX_COAL_PKT_MASK; 1965 CSR_WRITE_4(sc, JME_PCCRX0, reg); 1966 1967 /* Configure shadow status block but don't enable posting. */ 1968 paddr = sc->jme_rdata.jme_ssb_block_paddr; 1969 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 1970 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 1971 1972 /* Disable Timer 1 and Timer 2. */ 1973 CSR_WRITE_4(sc, JME_TIMER1, 0); 1974 CSR_WRITE_4(sc, JME_TIMER2, 0); 1975 1976 /* Configure retry transmit period, retry limit value. */ 1977 CSR_WRITE_4(sc, JME_TXTRHD, 1978 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 1979 TXTRHD_RT_PERIOD_MASK) | 1980 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 1981 TXTRHD_RT_LIMIT_SHIFT)); 1982 1983 /* Disable RSS. */ 1984 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 1985 1986 /* Initialize the interrupt mask. */ 1987 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1988 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 1989 1990 /* 1991 * Enabling Tx/Rx DMA engines and Rx queue processing is 1992 * done after detection of valid link in jme_miibus_statchg. 1993 */ 1994 sc->jme_flags &= ~JME_FLAG_LINK; 1995 1996 /* Set the current media. */ 1997 mii = &sc->sc_miibus; 1998 mii_mediachg(mii); 1999 2000 timeout_add_sec(&sc->jme_tick_ch, 1); 2001 2002 ifp->if_flags |= IFF_RUNNING; 2003 ifp->if_flags &= ~IFF_OACTIVE; 2004 2005 return (0); 2006 } 2007 2008 void 2009 jme_stop(struct jme_softc *sc) 2010 { 2011 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2012 struct jme_txdesc *txd; 2013 struct jme_rxdesc *rxd; 2014 int i; 2015 2016 /* 2017 * Mark the interface down and cancel the watchdog timer. 2018 */ 2019 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2020 ifp->if_timer = 0; 2021 2022 timeout_del(&sc->jme_tick_ch); 2023 sc->jme_flags &= ~JME_FLAG_LINK; 2024 2025 /* 2026 * Disable interrupts. 2027 */ 2028 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2029 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2030 2031 /* Disable updating shadow status block. */ 2032 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2033 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2034 2035 /* Stop receiver, transmitter. */ 2036 jme_stop_rx(sc); 2037 jme_stop_tx(sc); 2038 2039 #ifdef foo 2040 /* Reclaim Rx/Tx buffers that have been completed. */ 2041 jme_rxeof(sc); 2042 if (sc->jme_cdata.jme_rxhead != NULL) 2043 m_freem(sc->jme_cdata.jme_rxhead); 2044 JME_RXCHAIN_RESET(sc); 2045 jme_txeof(sc); 2046 #endif 2047 2048 /* 2049 * Free partial finished RX segments 2050 */ 2051 if (sc->jme_cdata.jme_rxhead != NULL) 2052 m_freem(sc->jme_cdata.jme_rxhead); 2053 JME_RXCHAIN_RESET(sc); 2054 2055 /* 2056 * Free RX and TX mbufs still in the queues. 2057 */ 2058 for (i = 0; i < JME_RX_RING_CNT; i++) { 2059 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2060 if (rxd->rx_m != NULL) { 2061 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2062 m_freem(rxd->rx_m); 2063 rxd->rx_m = NULL; 2064 } 2065 } 2066 for (i = 0; i < JME_TX_RING_CNT; i++) { 2067 txd = &sc->jme_cdata.jme_txdesc[i]; 2068 if (txd->tx_m != NULL) { 2069 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2070 m_freem(txd->tx_m); 2071 txd->tx_m = NULL; 2072 txd->tx_ndesc = 0; 2073 } 2074 } 2075 } 2076 2077 void 2078 jme_stop_tx(struct jme_softc *sc) 2079 { 2080 uint32_t reg; 2081 int i; 2082 2083 reg = CSR_READ_4(sc, JME_TXCSR); 2084 if ((reg & TXCSR_TX_ENB) == 0) 2085 return; 2086 reg &= ~TXCSR_TX_ENB; 2087 CSR_WRITE_4(sc, JME_TXCSR, reg); 2088 for (i = JME_TIMEOUT; i > 0; i--) { 2089 DELAY(1); 2090 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2091 break; 2092 } 2093 if (i == 0) 2094 printf("%s: stopping transmitter timeout!\n", 2095 sc->sc_dev.dv_xname); 2096 } 2097 2098 void 2099 jme_stop_rx(struct jme_softc *sc) 2100 { 2101 uint32_t reg; 2102 int i; 2103 2104 reg = CSR_READ_4(sc, JME_RXCSR); 2105 if ((reg & RXCSR_RX_ENB) == 0) 2106 return; 2107 reg &= ~RXCSR_RX_ENB; 2108 CSR_WRITE_4(sc, JME_RXCSR, reg); 2109 for (i = JME_TIMEOUT; i > 0; i--) { 2110 DELAY(1); 2111 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2112 break; 2113 } 2114 if (i == 0) 2115 printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname); 2116 } 2117 2118 void 2119 jme_init_tx_ring(struct jme_softc *sc) 2120 { 2121 struct jme_ring_data *rd; 2122 struct jme_txdesc *txd; 2123 int i; 2124 2125 sc->jme_cdata.jme_tx_prod = 0; 2126 sc->jme_cdata.jme_tx_cons = 0; 2127 sc->jme_cdata.jme_tx_cnt = 0; 2128 2129 rd = &sc->jme_rdata; 2130 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2131 for (i = 0; i < JME_TX_RING_CNT; i++) { 2132 txd = &sc->jme_cdata.jme_txdesc[i]; 2133 txd->tx_m = NULL; 2134 txd->tx_desc = &rd->jme_tx_ring[i]; 2135 txd->tx_ndesc = 0; 2136 } 2137 2138 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 2139 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2140 } 2141 2142 void 2143 jme_init_ssb(struct jme_softc *sc) 2144 { 2145 struct jme_ring_data *rd; 2146 2147 rd = &sc->jme_rdata; 2148 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2149 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0, 2150 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2151 } 2152 2153 int 2154 jme_init_rx_ring(struct jme_softc *sc) 2155 { 2156 struct jme_ring_data *rd; 2157 struct jme_rxdesc *rxd; 2158 int i; 2159 2160 KASSERT(sc->jme_cdata.jme_rxhead == NULL && 2161 sc->jme_cdata.jme_rxtail == NULL && 2162 sc->jme_cdata.jme_rxlen == 0); 2163 sc->jme_cdata.jme_rx_cons = 0; 2164 2165 rd = &sc->jme_rdata; 2166 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2167 for (i = 0; i < JME_RX_RING_CNT; i++) { 2168 int error; 2169 2170 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2171 rxd->rx_m = NULL; 2172 rxd->rx_desc = &rd->jme_rx_ring[i]; 2173 error = jme_newbuf(sc, rxd); 2174 if (error) 2175 return (error); 2176 } 2177 2178 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 2179 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2180 2181 return (0); 2182 } 2183 2184 int 2185 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd) 2186 { 2187 struct jme_desc *desc; 2188 struct mbuf *m; 2189 bus_dmamap_t map; 2190 int error; 2191 2192 MGETHDR(m, M_DONTWAIT, MT_DATA); 2193 if (m == NULL) 2194 return (ENOBUFS); 2195 MCLGET(m, M_DONTWAIT); 2196 if (!(m->m_flags & M_EXT)) { 2197 m_freem(m); 2198 return (ENOBUFS); 2199 } 2200 2201 /* 2202 * JMC250 has 64bit boundary alignment limitation so jme(4) 2203 * takes advantage of 10 bytes padding feature of hardware 2204 * in order not to copy entire frame to align IP header on 2205 * 32bit boundary. 2206 */ 2207 m->m_len = m->m_pkthdr.len = MCLBYTES; 2208 2209 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2210 sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT); 2211 2212 if (error != 0) { 2213 m_freem(m); 2214 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2215 return (error); 2216 } 2217 2218 if (rxd->rx_m != NULL) { 2219 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2220 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2221 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2222 } 2223 map = rxd->rx_dmamap; 2224 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2225 sc->jme_cdata.jme_rx_sparemap = map; 2226 rxd->rx_m = m; 2227 2228 desc = rxd->rx_desc; 2229 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len); 2230 desc->addr_lo = 2231 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2232 desc->addr_hi = 2233 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2234 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2235 2236 return (0); 2237 } 2238 2239 void 2240 jme_set_vlan(struct jme_softc *sc) 2241 { 2242 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2243 uint32_t reg; 2244 2245 reg = CSR_READ_4(sc, JME_RXMAC); 2246 reg &= ~RXMAC_VLAN_ENB; 2247 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2248 reg |= RXMAC_VLAN_ENB; 2249 CSR_WRITE_4(sc, JME_RXMAC, reg); 2250 } 2251 2252 void 2253 jme_iff(struct jme_softc *sc) 2254 { 2255 struct arpcom *ac = &sc->sc_arpcom; 2256 struct ifnet *ifp = &ac->ac_if; 2257 struct ether_multi *enm; 2258 struct ether_multistep step; 2259 uint32_t crc; 2260 uint32_t mchash[2]; 2261 uint32_t rxcfg; 2262 2263 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2264 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2265 RXMAC_ALLMULTI); 2266 ifp->if_flags &= ~IFF_ALLMULTI; 2267 2268 /* 2269 * Always accept frames destined to our station address. 2270 * Always accept broadcast frames. 2271 */ 2272 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2273 2274 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2275 ifp->if_flags |= IFF_ALLMULTI; 2276 if (ifp->if_flags & IFF_PROMISC) 2277 rxcfg |= RXMAC_PROMISC; 2278 else 2279 rxcfg |= RXMAC_ALLMULTI; 2280 mchash[0] = mchash[1] = 0xFFFFFFFF; 2281 } else { 2282 /* 2283 * Set up the multicast address filter by passing all 2284 * multicast addresses through a CRC generator, and then 2285 * using the low-order 6 bits as an index into the 64 bit 2286 * multicast hash table. The high order bits select the 2287 * register, while the rest of the bits select the bit 2288 * within the register. 2289 */ 2290 rxcfg |= RXMAC_MULTICAST; 2291 bzero(mchash, sizeof(mchash)); 2292 2293 ETHER_FIRST_MULTI(step, ac, enm); 2294 while (enm != NULL) { 2295 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2296 2297 /* Just want the 6 least significant bits. */ 2298 crc &= 0x3f; 2299 2300 /* Set the corresponding bit in the hash table. */ 2301 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2302 2303 ETHER_NEXT_MULTI(step, enm); 2304 } 2305 } 2306 2307 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2308 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2309 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2310 } 2311