1 /* $OpenBSD: if_jme.c,v 1.47 2016/04/13 10:34:32 mpi Exp $ */ 2 /*- 3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $ 29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $ 30 */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #include <netinet/in.h> 54 #include <netinet/if_ether.h> 55 56 #if NBPFILTER > 0 57 #include <net/bpf.h> 58 #endif 59 60 #include <dev/mii/miivar.h> 61 #include <dev/mii/jmphyreg.h> 62 63 #include <dev/pci/pcireg.h> 64 #include <dev/pci/pcivar.h> 65 #include <dev/pci/pcidevs.h> 66 67 #include <dev/pci/if_jmereg.h> 68 #include <dev/pci/if_jmevar.h> 69 70 /* Define the following to disable printing Rx errors. */ 71 #undef JME_SHOW_ERRORS 72 73 int jme_match(struct device *, void *, void *); 74 void jme_map_intr_vector(struct jme_softc *); 75 void jme_attach(struct device *, struct device *, void *); 76 int jme_detach(struct device *, int); 77 78 int jme_miibus_readreg(struct device *, int, int); 79 void jme_miibus_writereg(struct device *, int, int, int); 80 void jme_miibus_statchg(struct device *); 81 82 int jme_init(struct ifnet *); 83 int jme_ioctl(struct ifnet *, u_long, caddr_t); 84 85 void jme_start(struct ifnet *); 86 void jme_watchdog(struct ifnet *); 87 void jme_mediastatus(struct ifnet *, struct ifmediareq *); 88 int jme_mediachange(struct ifnet *); 89 90 int jme_intr(void *); 91 void jme_txeof(struct jme_softc *); 92 void jme_rxeof(struct jme_softc *); 93 94 int jme_dma_alloc(struct jme_softc *); 95 void jme_dma_free(struct jme_softc *); 96 int jme_init_rx_ring(struct jme_softc *); 97 void jme_init_tx_ring(struct jme_softc *); 98 void jme_init_ssb(struct jme_softc *); 99 int jme_newbuf(struct jme_softc *, struct jme_rxdesc *); 100 int jme_encap(struct jme_softc *, struct mbuf *); 101 void jme_rxpkt(struct jme_softc *); 102 103 void jme_tick(void *); 104 void jme_stop(struct jme_softc *); 105 void jme_reset(struct jme_softc *); 106 void jme_set_vlan(struct jme_softc *); 107 void jme_iff(struct jme_softc *); 108 void jme_stop_tx(struct jme_softc *); 109 void jme_stop_rx(struct jme_softc *); 110 void jme_mac_config(struct jme_softc *); 111 void jme_reg_macaddr(struct jme_softc *, uint8_t[]); 112 int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]); 113 int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 114 void jme_discard_rxbufs(struct jme_softc *, int, int); 115 #ifdef notyet 116 void jme_setwol(struct jme_softc *); 117 void jme_setlinkspeed(struct jme_softc *); 118 #endif 119 120 /* 121 * Devices supported by this driver. 122 */ 123 const struct pci_matchid jme_devices[] = { 124 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 }, 125 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 } 126 }; 127 128 struct cfattach jme_ca = { 129 sizeof (struct jme_softc), jme_match, jme_attach 130 }; 131 132 struct cfdriver jme_cd = { 133 NULL, "jme", DV_IFNET 134 }; 135 136 int jmedebug = 0; 137 #define DPRINTF(x) do { if (jmedebug) printf x; } while (0) 138 139 /* 140 * Read a PHY register on the MII of the JMC250. 141 */ 142 int 143 jme_miibus_readreg(struct device *dev, int phy, int reg) 144 { 145 struct jme_softc *sc = (struct jme_softc *)dev; 146 uint32_t val; 147 int i; 148 149 /* For FPGA version, PHY address 0 should be ignored. */ 150 if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0) 151 return (0); 152 153 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 154 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 155 156 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 157 DELAY(1); 158 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 159 break; 160 } 161 if (i == 0) { 162 printf("%s: phy read timeout: phy %d, reg %d\n", 163 sc->sc_dev.dv_xname, phy, reg); 164 return (0); 165 } 166 167 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 168 } 169 170 /* 171 * Write a PHY register on the MII of the JMC250. 172 */ 173 void 174 jme_miibus_writereg(struct device *dev, int phy, int reg, int val) 175 { 176 struct jme_softc *sc = (struct jme_softc *)dev; 177 int i; 178 179 /* For FPGA version, PHY address 0 should be ignored. */ 180 if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0) 181 return; 182 183 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 184 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 185 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 186 187 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 188 DELAY(1); 189 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 190 break; 191 } 192 if (i == 0) { 193 printf("%s: phy write timeout: phy %d, reg %d\n", 194 sc->sc_dev.dv_xname, phy, reg); 195 } 196 } 197 198 /* 199 * Callback from MII layer when media changes. 200 */ 201 void 202 jme_miibus_statchg(struct device *dev) 203 { 204 struct jme_softc *sc = (struct jme_softc *)dev; 205 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 206 struct mii_data *mii; 207 struct jme_txdesc *txd; 208 bus_addr_t paddr; 209 int i; 210 211 if ((ifp->if_flags & IFF_RUNNING) == 0) 212 return; 213 214 mii = &sc->sc_miibus; 215 216 sc->jme_flags &= ~JME_FLAG_LINK; 217 if ((mii->mii_media_status & IFM_AVALID) != 0) { 218 switch (IFM_SUBTYPE(mii->mii_media_active)) { 219 case IFM_10_T: 220 case IFM_100_TX: 221 sc->jme_flags |= JME_FLAG_LINK; 222 break; 223 case IFM_1000_T: 224 if (sc->jme_caps & JME_CAP_FASTETH) 225 break; 226 sc->jme_flags |= JME_FLAG_LINK; 227 break; 228 default: 229 break; 230 } 231 } 232 233 /* 234 * Disabling Rx/Tx MACs have a side-effect of resetting 235 * JME_TXNDA/JME_RXNDA register to the first address of 236 * Tx/Rx descriptor address. So driver should reset its 237 * internal procucer/consumer pointer and reclaim any 238 * allocated resources. Note, just saving the value of 239 * JME_TXNDA and JME_RXNDA registers before stopping MAC 240 * and restoring JME_TXNDA/JME_RXNDA register is not 241 * sufficient to make sure correct MAC state because 242 * stopping MAC operation can take a while and hardware 243 * might have updated JME_TXNDA/JME_RXNDA registers 244 * during the stop operation. 245 */ 246 247 /* Disable interrupts */ 248 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 249 250 /* Stop driver */ 251 ifp->if_flags &= ~IFF_RUNNING; 252 ifq_clr_oactive(&ifp->if_snd); 253 ifp->if_timer = 0; 254 timeout_del(&sc->jme_tick_ch); 255 256 /* Stop receiver/transmitter. */ 257 jme_stop_rx(sc); 258 jme_stop_tx(sc); 259 260 jme_rxeof(sc); 261 if (sc->jme_cdata.jme_rxhead != NULL) 262 m_freem(sc->jme_cdata.jme_rxhead); 263 JME_RXCHAIN_RESET(sc); 264 265 jme_txeof(sc); 266 if (sc->jme_cdata.jme_tx_cnt != 0) { 267 /* Remove queued packets for transmit. */ 268 for (i = 0; i < JME_TX_RING_CNT; i++) { 269 txd = &sc->jme_cdata.jme_txdesc[i]; 270 if (txd->tx_m != NULL) { 271 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 272 m_freem(txd->tx_m); 273 txd->tx_m = NULL; 274 txd->tx_ndesc = 0; 275 ifp->if_oerrors++; 276 } 277 } 278 } 279 280 /* 281 * Reuse configured Rx descriptors and reset 282 * procuder/consumer index. 283 */ 284 sc->jme_cdata.jme_rx_cons = 0; 285 286 jme_init_tx_ring(sc); 287 288 /* Initialize shadow status block. */ 289 jme_init_ssb(sc); 290 291 /* Program MAC with resolved speed/duplex/flow-control. */ 292 if (sc->jme_flags & JME_FLAG_LINK) { 293 jme_mac_config(sc); 294 295 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 296 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 297 298 /* Set Tx ring address to the hardware. */ 299 paddr = JME_TX_RING_ADDR(sc, 0); 300 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 301 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 302 303 /* Set Rx ring address to the hardware. */ 304 paddr = JME_RX_RING_ADDR(sc, 0); 305 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 306 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 307 308 /* Restart receiver/transmitter. */ 309 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 310 RXCSR_RXQ_START); 311 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 312 } 313 314 ifp->if_flags |= IFF_RUNNING; 315 ifq_clr_oactive(&ifp->if_snd); 316 timeout_add_sec(&sc->jme_tick_ch, 1); 317 318 /* Reenable interrupts. */ 319 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 320 } 321 322 /* 323 * Get the current interface media status. 324 */ 325 void 326 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 327 { 328 struct jme_softc *sc = ifp->if_softc; 329 struct mii_data *mii = &sc->sc_miibus; 330 331 mii_pollstat(mii); 332 ifmr->ifm_status = mii->mii_media_status; 333 ifmr->ifm_active = mii->mii_media_active; 334 } 335 336 /* 337 * Set hardware to newly-selected media. 338 */ 339 int 340 jme_mediachange(struct ifnet *ifp) 341 { 342 struct jme_softc *sc = ifp->if_softc; 343 struct mii_data *mii = &sc->sc_miibus; 344 int error; 345 346 if (mii->mii_instance != 0) { 347 struct mii_softc *miisc; 348 349 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 350 mii_phy_reset(miisc); 351 } 352 error = mii_mediachg(mii); 353 354 return (error); 355 } 356 357 int 358 jme_match(struct device *dev, void *match, void *aux) 359 { 360 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices, 361 sizeof (jme_devices) / sizeof (jme_devices[0])); 362 } 363 364 int 365 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 366 { 367 uint32_t reg; 368 int i; 369 370 *val = 0; 371 for (i = JME_TIMEOUT; i > 0; i--) { 372 reg = CSR_READ_4(sc, JME_SMBCSR); 373 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 374 break; 375 DELAY(1); 376 } 377 378 if (i == 0) { 379 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname); 380 return (ETIMEDOUT); 381 } 382 383 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 384 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 385 for (i = JME_TIMEOUT; i > 0; i--) { 386 DELAY(1); 387 reg = CSR_READ_4(sc, JME_SMBINTF); 388 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 389 break; 390 } 391 392 if (i == 0) { 393 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname); 394 return (ETIMEDOUT); 395 } 396 397 reg = CSR_READ_4(sc, JME_SMBINTF); 398 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 399 400 return (0); 401 } 402 403 int 404 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 405 { 406 uint8_t fup, reg, val; 407 uint32_t offset; 408 int match; 409 410 offset = 0; 411 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 412 fup != JME_EEPROM_SIG0) 413 return (ENOENT); 414 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 415 fup != JME_EEPROM_SIG1) 416 return (ENOENT); 417 match = 0; 418 do { 419 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 420 break; 421 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 422 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 423 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 424 break; 425 if (reg >= JME_PAR0 && 426 reg < JME_PAR0 + ETHER_ADDR_LEN) { 427 if (jme_eeprom_read_byte(sc, offset + 2, 428 &val) != 0) 429 break; 430 eaddr[reg - JME_PAR0] = val; 431 match++; 432 } 433 } 434 /* Check for the end of EEPROM descriptor. */ 435 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 436 break; 437 /* Try next eeprom descriptor. */ 438 offset += JME_EEPROM_DESC_BYTES; 439 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 440 441 if (match == ETHER_ADDR_LEN) 442 return (0); 443 444 return (ENOENT); 445 } 446 447 void 448 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[]) 449 { 450 uint32_t par0, par1; 451 452 /* Read station address. */ 453 par0 = CSR_READ_4(sc, JME_PAR0); 454 par1 = CSR_READ_4(sc, JME_PAR1); 455 par1 &= 0xFFFF; 456 457 eaddr[0] = (par0 >> 0) & 0xFF; 458 eaddr[1] = (par0 >> 8) & 0xFF; 459 eaddr[2] = (par0 >> 16) & 0xFF; 460 eaddr[3] = (par0 >> 24) & 0xFF; 461 eaddr[4] = (par1 >> 0) & 0xFF; 462 eaddr[5] = (par1 >> 8) & 0xFF; 463 } 464 465 void 466 jme_map_intr_vector(struct jme_softc *sc) 467 { 468 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES]; 469 470 bzero(map, sizeof(map)); 471 472 /* Map Tx interrupts source to MSI/MSIX vector 2. */ 473 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] = 474 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP); 475 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |= 476 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP); 477 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |= 478 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP); 479 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |= 480 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP); 481 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 482 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP); 483 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 484 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP); 485 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |= 486 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP); 487 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |= 488 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP); 489 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |= 490 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL); 491 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |= 492 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO); 493 494 /* Map Rx interrupts source to MSI/MSIX vector 1. */ 495 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] = 496 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP); 497 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] = 498 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP); 499 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] = 500 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP); 501 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] = 502 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP); 503 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] = 504 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY); 505 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] = 506 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY); 507 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] = 508 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY); 509 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] = 510 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY); 511 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] = 512 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL); 513 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] = 514 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL); 515 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] = 516 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL); 517 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] = 518 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL); 519 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] = 520 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO); 521 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] = 522 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO); 523 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] = 524 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO); 525 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] = 526 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO); 527 528 /* Map all other interrupts source to MSI/MSIX vector 0. */ 529 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]); 530 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]); 531 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]); 532 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]); 533 } 534 535 void 536 jme_attach(struct device *parent, struct device *self, void *aux) 537 { 538 struct jme_softc *sc = (struct jme_softc *)self; 539 struct pci_attach_args *pa = aux; 540 pci_chipset_tag_t pc = pa->pa_pc; 541 pci_intr_handle_t ih; 542 const char *intrstr; 543 pcireg_t memtype; 544 545 struct ifnet *ifp; 546 uint32_t reg; 547 int error = 0; 548 549 /* 550 * Allocate IO memory 551 * 552 * JMC250 supports both memory mapped and I/O register space 553 * access. Because I/O register access should use different 554 * BARs to access registers it's waste of time to use I/O 555 * register spce access. JMC250 uses 16K to map entire memory 556 * space. 557 */ 558 559 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR); 560 if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt, 561 &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) { 562 printf(": can't map mem space\n"); 563 return; 564 } 565 566 if (pci_intr_map_msi(pa, &ih) == 0) 567 jme_map_intr_vector(sc); 568 else if (pci_intr_map(pa, &ih) != 0) { 569 printf(": can't map interrupt\n"); 570 return; 571 } 572 573 /* 574 * Allocate IRQ 575 */ 576 intrstr = pci_intr_string(pc, ih); 577 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc, 578 sc->sc_dev.dv_xname); 579 if (sc->sc_irq_handle == NULL) { 580 printf(": could not establish interrupt"); 581 if (intrstr != NULL) 582 printf(" at %s", intrstr); 583 printf("\n"); 584 return; 585 } 586 printf(": %s", intrstr); 587 588 sc->sc_dmat = pa->pa_dmat; 589 sc->jme_pct = pa->pa_pc; 590 sc->jme_pcitag = pa->pa_tag; 591 592 /* 593 * Extract FPGA revision 594 */ 595 reg = CSR_READ_4(sc, JME_CHIPMODE); 596 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 597 CHIPMODE_NOT_FPGA) { 598 sc->jme_caps |= JME_CAP_FPGA; 599 600 if (jmedebug) { 601 printf("%s: FPGA revision : 0x%04x\n", 602 sc->sc_dev.dv_xname, 603 (reg & CHIPMODE_FPGA_REV_MASK) >> 604 CHIPMODE_FPGA_REV_SHIFT); 605 } 606 } 607 608 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT; 609 610 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 && 611 PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2) 612 sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS; 613 614 /* Reset the ethernet controller. */ 615 jme_reset(sc); 616 617 /* Get station address. */ 618 reg = CSR_READ_4(sc, JME_SMBCSR); 619 if (reg & SMBCSR_EEPROM_PRESENT) 620 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr); 621 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) { 622 if (error != 0 && (jmedebug)) { 623 printf("%s: ethernet hardware address " 624 "not found in EEPROM.\n", sc->sc_dev.dv_xname); 625 } 626 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr); 627 } 628 629 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 630 631 /* 632 * Save PHY address. 633 * Integrated JR0211 has fixed PHY address whereas FPGA version 634 * requires PHY probing to get correct PHY address. 635 */ 636 if ((sc->jme_caps & JME_CAP_FPGA) == 0) { 637 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 638 GPREG0_PHY_ADDR_MASK; 639 if (jmedebug) { 640 printf("%s: PHY is at address %d.\n", 641 sc->sc_dev.dv_xname, sc->jme_phyaddr); 642 } 643 } else { 644 sc->jme_phyaddr = 0; 645 } 646 647 /* Set max allowable DMA size. */ 648 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 649 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 650 651 #ifdef notyet 652 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 653 sc->jme_caps |= JME_CAP_PMCAP; 654 #endif 655 656 /* Allocate DMA stuffs */ 657 error = jme_dma_alloc(sc); 658 if (error) 659 goto fail; 660 661 ifp = &sc->sc_arpcom.ac_if; 662 ifp->if_softc = sc; 663 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 664 ifp->if_ioctl = jme_ioctl; 665 ifp->if_start = jme_start; 666 ifp->if_watchdog = jme_watchdog; 667 IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1); 668 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 669 670 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 671 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | 672 IFCAP_CSUM_UDPv6; 673 674 #if NVLAN > 0 675 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 676 #endif 677 678 /* Set up MII bus. */ 679 sc->sc_miibus.mii_ifp = ifp; 680 sc->sc_miibus.mii_readreg = jme_miibus_readreg; 681 sc->sc_miibus.mii_writereg = jme_miibus_writereg; 682 sc->sc_miibus.mii_statchg = jme_miibus_statchg; 683 684 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange, 685 jme_mediastatus); 686 mii_attach(self, &sc->sc_miibus, 0xffffffff, 687 sc->jme_caps & JME_CAP_FPGA ? MII_PHY_ANY : sc->jme_phyaddr, 688 MII_OFFSET_ANY, MIIF_DOPAUSE); 689 690 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 691 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 692 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 693 0, NULL); 694 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 695 } else 696 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 697 698 /* 699 * Save PHYADDR for FPGA mode PHY not handled, not production hw 700 */ 701 702 if_attach(ifp); 703 ether_ifattach(ifp); 704 705 timeout_set(&sc->jme_tick_ch, jme_tick, sc); 706 707 return; 708 fail: 709 jme_detach(&sc->sc_dev, 0); 710 } 711 712 int 713 jme_detach(struct device *self, int flags) 714 { 715 struct jme_softc *sc = (struct jme_softc *)self; 716 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 717 int s; 718 719 s = splnet(); 720 jme_stop(sc); 721 splx(s); 722 723 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 724 725 /* Delete all remaining media. */ 726 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 727 728 ether_ifdetach(ifp); 729 if_detach(ifp); 730 jme_dma_free(sc); 731 732 if (sc->sc_irq_handle != NULL) { 733 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle); 734 sc->sc_irq_handle = NULL; 735 } 736 737 return (0); 738 } 739 740 int 741 jme_dma_alloc(struct jme_softc *sc) 742 { 743 struct jme_txdesc *txd; 744 struct jme_rxdesc *rxd; 745 int error, i, nsegs; 746 747 /* 748 * Create DMA stuffs for TX ring 749 */ 750 751 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1, 752 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT, 753 &sc->jme_cdata.jme_tx_ring_map); 754 if (error) 755 return (ENOBUFS); 756 757 /* Allocate DMA'able memory for TX ring */ 758 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0, 759 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs, 760 BUS_DMA_WAITOK); 761 /* XXX zero */ 762 if (error) { 763 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 764 sc->sc_dev.dv_xname); 765 return error; 766 } 767 768 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg, 769 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring, 770 BUS_DMA_NOWAIT); 771 if (error) 772 return (ENOBUFS); 773 774 /* Load the DMA map for Tx ring. */ 775 error = bus_dmamap_load(sc->sc_dmat, 776 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 777 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 778 if (error) { 779 printf("%s: could not load DMA'able memory for Tx ring.\n", 780 sc->sc_dev.dv_xname); 781 bus_dmamem_free(sc->sc_dmat, 782 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1); 783 return error; 784 } 785 sc->jme_rdata.jme_tx_ring_paddr = 786 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr; 787 788 /* 789 * Create DMA stuffs for RX ring 790 */ 791 792 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1, 793 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT, 794 &sc->jme_cdata.jme_rx_ring_map); 795 if (error) 796 return (ENOBUFS); 797 798 /* Allocate DMA'able memory for RX ring */ 799 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0, 800 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs, 801 BUS_DMA_WAITOK | BUS_DMA_ZERO); 802 /* XXX zero */ 803 if (error) { 804 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 805 sc->sc_dev.dv_xname); 806 return error; 807 } 808 809 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg, 810 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring, 811 BUS_DMA_NOWAIT); 812 if (error) 813 return (ENOBUFS); 814 815 /* Load the DMA map for Rx ring. */ 816 error = bus_dmamap_load(sc->sc_dmat, 817 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 818 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT); 819 if (error) { 820 printf("%s: could not load DMA'able memory for Rx ring.\n", 821 sc->sc_dev.dv_xname); 822 bus_dmamem_free(sc->sc_dmat, 823 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 824 return error; 825 } 826 sc->jme_rdata.jme_rx_ring_paddr = 827 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr; 828 829 #if 0 830 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 831 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE; 832 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE; 833 if ((JME_ADDR_HI(tx_ring_end) != 834 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 835 (JME_ADDR_HI(rx_ring_end) != 836 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 837 printf("%s: 4GB boundary crossed, switching to 32bit " 838 "DMA address mode.\n", sc->sc_dev.dv_xname); 839 jme_dma_free(sc); 840 /* Limit DMA address space to 32bit and try again. */ 841 lowaddr = BUS_SPACE_MAXADDR_32BIT; 842 goto again; 843 } 844 #endif 845 846 /* 847 * Create DMA stuffs for shadow status block 848 */ 849 850 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1, 851 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map); 852 if (error) 853 return (ENOBUFS); 854 855 /* Allocate DMA'able memory for shared status block. */ 856 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0, 857 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK); 858 if (error) { 859 printf("%s: could not allocate DMA'able " 860 "memory for shared status block.\n", sc->sc_dev.dv_xname); 861 return error; 862 } 863 864 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg, 865 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block, 866 BUS_DMA_NOWAIT); 867 if (error) 868 return (ENOBUFS); 869 870 /* Load the DMA map for shared status block */ 871 error = bus_dmamap_load(sc->sc_dmat, 872 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 873 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT); 874 if (error) { 875 printf("%s: could not load DMA'able memory " 876 "for shared status block.\n", sc->sc_dev.dv_xname); 877 bus_dmamem_free(sc->sc_dmat, 878 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 879 return error; 880 } 881 sc->jme_rdata.jme_ssb_block_paddr = 882 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr; 883 884 /* 885 * Create DMA stuffs for TX buffers 886 */ 887 888 /* Create DMA maps for Tx buffers. */ 889 for (i = 0; i < JME_TX_RING_CNT; i++) { 890 txd = &sc->jme_cdata.jme_txdesc[i]; 891 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE, 892 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 893 &txd->tx_dmamap); 894 if (error) { 895 int j; 896 897 printf("%s: could not create %dth Tx dmamap.\n", 898 sc->sc_dev.dv_xname, i); 899 900 for (j = 0; j < i; ++j) { 901 txd = &sc->jme_cdata.jme_txdesc[j]; 902 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 903 } 904 return error; 905 } 906 907 } 908 909 /* 910 * Create DMA stuffs for RX buffers 911 */ 912 913 /* Create DMA maps for Rx buffers. */ 914 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 915 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap); 916 if (error) { 917 printf("%s: could not create spare Rx dmamap.\n", 918 sc->sc_dev.dv_xname); 919 return error; 920 } 921 for (i = 0; i < JME_RX_RING_CNT; i++) { 922 rxd = &sc->jme_cdata.jme_rxdesc[i]; 923 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 924 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 925 if (error) { 926 int j; 927 928 printf("%s: could not create %dth Rx dmamap.\n", 929 sc->sc_dev.dv_xname, i); 930 931 for (j = 0; j < i; ++j) { 932 rxd = &sc->jme_cdata.jme_rxdesc[j]; 933 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 934 } 935 bus_dmamap_destroy(sc->sc_dmat, 936 sc->jme_cdata.jme_rx_sparemap); 937 sc->jme_cdata.jme_rx_tag = NULL; 938 return error; 939 } 940 } 941 942 return 0; 943 } 944 945 void 946 jme_dma_free(struct jme_softc *sc) 947 { 948 struct jme_txdesc *txd; 949 struct jme_rxdesc *rxd; 950 int i; 951 952 /* Tx ring */ 953 bus_dmamap_unload(sc->sc_dmat, 954 sc->jme_cdata.jme_tx_ring_map); 955 bus_dmamem_free(sc->sc_dmat, 956 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1); 957 958 /* Rx ring */ 959 bus_dmamap_unload(sc->sc_dmat, 960 sc->jme_cdata.jme_rx_ring_map); 961 bus_dmamem_free(sc->sc_dmat, 962 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1); 963 964 /* Tx buffers */ 965 for (i = 0; i < JME_TX_RING_CNT; i++) { 966 txd = &sc->jme_cdata.jme_txdesc[i]; 967 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 968 } 969 970 /* Rx buffers */ 971 for (i = 0; i < JME_RX_RING_CNT; i++) { 972 rxd = &sc->jme_cdata.jme_rxdesc[i]; 973 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 974 } 975 bus_dmamap_destroy(sc->sc_dmat, 976 sc->jme_cdata.jme_rx_sparemap); 977 978 /* Shadow status block. */ 979 bus_dmamap_unload(sc->sc_dmat, 980 sc->jme_cdata.jme_ssb_map); 981 bus_dmamem_free(sc->sc_dmat, 982 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1); 983 } 984 985 #ifdef notyet 986 /* 987 * Unlike other ethernet controllers, JMC250 requires 988 * explicit resetting link speed to 10/100Mbps as gigabit 989 * link will cunsume more power than 375mA. 990 * Note, we reset the link speed to 10/100Mbps with 991 * auto-negotiation but we don't know whether that operation 992 * would succeed or not as we have no control after powering 993 * off. If the renegotiation fail WOL may not work. Running 994 * at 1Gbps draws more power than 375mA at 3.3V which is 995 * specified in PCI specification and that would result in 996 * complete shutdowning power to ethernet controller. 997 * 998 * TODO 999 * Save current negotiated media speed/duplex/flow-control 1000 * to softc and restore the same link again after resuming. 1001 * PHY handling such as power down/resetting to 100Mbps 1002 * may be better handled in suspend method in phy driver. 1003 */ 1004 void 1005 jme_setlinkspeed(struct jme_softc *sc) 1006 { 1007 struct mii_data *mii; 1008 int aneg, i; 1009 1010 JME_LOCK_ASSERT(sc); 1011 1012 mii = &sc->sc_miibus; 1013 mii_pollstat(mii); 1014 aneg = 0; 1015 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1016 switch IFM_SUBTYPE(mii->mii_media_active) { 1017 case IFM_10_T: 1018 case IFM_100_TX: 1019 return; 1020 case IFM_1000_T: 1021 aneg++; 1022 default: 1023 break; 1024 } 1025 } 1026 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1027 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR, 1028 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1029 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR, 1030 BMCR_AUTOEN | BMCR_STARTNEG); 1031 DELAY(1000); 1032 if (aneg != 0) { 1033 /* Poll link state until jme(4) get a 10/100 link. */ 1034 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1035 mii_pollstat(mii); 1036 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1037 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1038 case IFM_10_T: 1039 case IFM_100_TX: 1040 jme_mac_config(sc); 1041 return; 1042 default: 1043 break; 1044 } 1045 } 1046 JME_UNLOCK(sc); 1047 pause("jmelnk", hz); 1048 JME_LOCK(sc); 1049 } 1050 if (i == MII_ANEGTICKS_GIGE) 1051 printf("%s: establishing link failed, " 1052 "WOL may not work!\n", sc->sc_dev.dv_xname); 1053 } 1054 /* 1055 * No link, force MAC to have 100Mbps, full-duplex link. 1056 * This is the last resort and may/may not work. 1057 */ 1058 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1059 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1060 jme_mac_config(sc); 1061 } 1062 1063 void 1064 jme_setwol(struct jme_softc *sc) 1065 { 1066 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1067 uint32_t gpr, pmcs; 1068 uint16_t pmstat; 1069 int pmc; 1070 1071 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) { 1072 /* No PME capability, PHY power down. */ 1073 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1074 MII_BMCR, BMCR_PDOWN); 1075 return; 1076 } 1077 1078 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1079 pmcs = CSR_READ_4(sc, JME_PMCS); 1080 pmcs &= ~PMCS_WOL_ENB_MASK; 1081 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1082 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1083 /* Enable PME message. */ 1084 gpr |= GPREG0_PME_ENB; 1085 /* For gigabit controllers, reset link speed to 10/100. */ 1086 if ((sc->jme_caps & JME_CAP_FASTETH) == 0) 1087 jme_setlinkspeed(sc); 1088 } 1089 1090 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1091 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1092 1093 /* Request PME. */ 1094 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2); 1095 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1096 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1097 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1098 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1099 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1100 /* No WOL, PHY power down. */ 1101 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1102 MII_BMCR, BMCR_PDOWN); 1103 } 1104 } 1105 #endif 1106 1107 int 1108 jme_encap(struct jme_softc *sc, struct mbuf *m) 1109 { 1110 struct jme_txdesc *txd; 1111 struct jme_desc *desc; 1112 int error, i, prod; 1113 uint32_t cflags; 1114 1115 prod = sc->jme_cdata.jme_tx_prod; 1116 txd = &sc->jme_cdata.jme_txdesc[prod]; 1117 1118 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, 1119 m, BUS_DMA_NOWAIT); 1120 if (error != 0 && error != EFBIG) 1121 goto drop; 1122 if (error != 0) { 1123 if (m_defrag(m, M_DONTWAIT)) { 1124 error = ENOBUFS; 1125 goto drop; 1126 } 1127 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, 1128 m, BUS_DMA_NOWAIT); 1129 if (error != 0) 1130 goto drop; 1131 } 1132 1133 cflags = 0; 1134 1135 /* Configure checksum offload. */ 1136 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1137 cflags |= JME_TD_IPCSUM; 1138 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1139 cflags |= JME_TD_TCPCSUM; 1140 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1141 cflags |= JME_TD_UDPCSUM; 1142 1143 #if NVLAN > 0 1144 /* Configure VLAN. */ 1145 if (m->m_flags & M_VLANTAG) { 1146 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1147 cflags |= JME_TD_VLAN_TAG; 1148 } 1149 #endif 1150 1151 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1152 desc->flags = htole32(cflags); 1153 desc->buflen = 0; 1154 desc->addr_hi = htole32(m->m_pkthdr.len); 1155 desc->addr_lo = 0; 1156 sc->jme_cdata.jme_tx_cnt++; 1157 JME_DESC_INC(prod, JME_TX_RING_CNT); 1158 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) { 1159 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1160 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1161 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len); 1162 desc->addr_hi = 1163 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr)); 1164 desc->addr_lo = 1165 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr)); 1166 sc->jme_cdata.jme_tx_cnt++; 1167 JME_DESC_INC(prod, JME_TX_RING_CNT); 1168 } 1169 1170 /* Update producer index. */ 1171 sc->jme_cdata.jme_tx_prod = prod; 1172 /* 1173 * Finally request interrupt and give the first descriptor 1174 * owenership to hardware. 1175 */ 1176 desc = txd->tx_desc; 1177 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1178 1179 txd->tx_m = m; 1180 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD; 1181 1182 /* Sync descriptors. */ 1183 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1184 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1185 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1186 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1187 1188 return (0); 1189 1190 drop: 1191 m_freem(m); 1192 return (error); 1193 } 1194 1195 void 1196 jme_start(struct ifnet *ifp) 1197 { 1198 struct jme_softc *sc = ifp->if_softc; 1199 struct mbuf *m; 1200 int enq = 0; 1201 1202 /* Reclaim transmitted frames. */ 1203 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1204 jme_txeof(sc); 1205 1206 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1207 return; 1208 if ((sc->jme_flags & JME_FLAG_LINK) == 0) 1209 return; 1210 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1211 return; 1212 1213 for (;;) { 1214 /* 1215 * Check number of available TX descs, always 1216 * leave JME_TXD_RSVD free TX descs. 1217 */ 1218 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD > 1219 JME_TX_RING_CNT - JME_TXD_RSVD) { 1220 ifq_set_oactive(&ifp->if_snd); 1221 break; 1222 } 1223 1224 IFQ_DEQUEUE(&ifp->if_snd, m); 1225 if (m == NULL) 1226 break; 1227 1228 /* 1229 * Pack the data into the transmit ring. If we 1230 * don't have room, set the OACTIVE flag and wait 1231 * for the NIC to drain the ring. 1232 */ 1233 if (jme_encap(sc, m) != 0) { 1234 ifp->if_oerrors++; 1235 continue; 1236 } 1237 1238 enq++; 1239 1240 #if NBPFILTER > 0 1241 /* 1242 * If there's a BPF listener, bounce a copy of this frame 1243 * to him. 1244 */ 1245 if (ifp->if_bpf != NULL) 1246 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1247 #endif 1248 } 1249 1250 if (enq > 0) { 1251 /* 1252 * Reading TXCSR takes very long time under heavy load 1253 * so cache TXCSR value and writes the ORed value with 1254 * the kick command to the TXCSR. This saves one register 1255 * access cycle. 1256 */ 1257 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1258 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1259 /* Set a timeout in case the chip goes out to lunch. */ 1260 ifp->if_timer = JME_TX_TIMEOUT; 1261 } 1262 } 1263 1264 void 1265 jme_watchdog(struct ifnet *ifp) 1266 { 1267 struct jme_softc *sc = ifp->if_softc; 1268 1269 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1270 printf("%s: watchdog timeout (missed link)\n", 1271 sc->sc_dev.dv_xname); 1272 ifp->if_oerrors++; 1273 jme_init(ifp); 1274 return; 1275 } 1276 1277 jme_txeof(sc); 1278 if (sc->jme_cdata.jme_tx_cnt == 0) { 1279 printf("%s: watchdog timeout (missed Tx interrupts) " 1280 "-- recovering\n", sc->sc_dev.dv_xname); 1281 jme_start(ifp); 1282 return; 1283 } 1284 1285 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1286 ifp->if_oerrors++; 1287 jme_init(ifp); 1288 jme_start(ifp); 1289 } 1290 1291 int 1292 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1293 { 1294 struct jme_softc *sc = ifp->if_softc; 1295 struct mii_data *mii = &sc->sc_miibus; 1296 struct ifreq *ifr = (struct ifreq *)data; 1297 int error = 0, s; 1298 1299 s = splnet(); 1300 1301 switch (cmd) { 1302 case SIOCSIFADDR: 1303 ifp->if_flags |= IFF_UP; 1304 if (!(ifp->if_flags & IFF_RUNNING)) 1305 jme_init(ifp); 1306 break; 1307 1308 case SIOCSIFFLAGS: 1309 if (ifp->if_flags & IFF_UP) { 1310 if (ifp->if_flags & IFF_RUNNING) 1311 error = ENETRESET; 1312 else 1313 jme_init(ifp); 1314 } else { 1315 if (ifp->if_flags & IFF_RUNNING) 1316 jme_stop(sc); 1317 } 1318 break; 1319 1320 case SIOCSIFMEDIA: 1321 case SIOCGIFMEDIA: 1322 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1323 break; 1324 1325 default: 1326 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1327 } 1328 1329 if (error == ENETRESET) { 1330 if (ifp->if_flags & IFF_RUNNING) 1331 jme_iff(sc); 1332 error = 0; 1333 } 1334 1335 splx(s); 1336 return (error); 1337 } 1338 1339 void 1340 jme_mac_config(struct jme_softc *sc) 1341 { 1342 struct mii_data *mii; 1343 uint32_t ghc, rxmac, txmac, txpause, gp1; 1344 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0; 1345 1346 mii = &sc->sc_miibus; 1347 1348 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1349 DELAY(10); 1350 CSR_WRITE_4(sc, JME_GHC, 0); 1351 ghc = 0; 1352 rxmac = CSR_READ_4(sc, JME_RXMAC); 1353 rxmac &= ~RXMAC_FC_ENB; 1354 txmac = CSR_READ_4(sc, JME_TXMAC); 1355 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 1356 txpause = CSR_READ_4(sc, JME_TXPFC); 1357 txpause &= ~TXPFC_PAUSE_ENB; 1358 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1359 ghc |= GHC_FULL_DUPLEX; 1360 rxmac &= ~RXMAC_COLL_DET_ENB; 1361 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 1362 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 1363 TXMAC_FRAME_BURST); 1364 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1365 txpause |= TXPFC_PAUSE_ENB; 1366 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1367 rxmac |= RXMAC_FC_ENB; 1368 /* Disable retry transmit timer/retry limit. */ 1369 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 1370 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 1371 } else { 1372 rxmac |= RXMAC_COLL_DET_ENB; 1373 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 1374 /* Enable retry transmit timer/retry limit. */ 1375 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 1376 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 1377 } 1378 1379 /* 1380 * Reprogram Tx/Rx MACs with resolved speed/duplex. 1381 */ 1382 gp1 = CSR_READ_4(sc, JME_GPREG1); 1383 gp1 &= ~GPREG1_HALF_PATCH; 1384 1385 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 1386 hdx = 1; 1387 1388 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1389 case IFM_10_T: 1390 ghc |= GHC_SPEED_10; 1391 if (hdx) 1392 gp1 |= GPREG1_HALF_PATCH; 1393 break; 1394 1395 case IFM_100_TX: 1396 ghc |= GHC_SPEED_100; 1397 if (hdx) 1398 gp1 |= GPREG1_HALF_PATCH; 1399 1400 /* 1401 * Use extended FIFO depth to workaround CRC errors 1402 * emitted by chips before JMC250B 1403 */ 1404 phyconf = JMPHY_CONF_EXTFIFO; 1405 break; 1406 1407 case IFM_1000_T: 1408 if (sc->jme_caps & JME_CAP_FASTETH) 1409 break; 1410 1411 ghc |= GHC_SPEED_1000; 1412 if (hdx) 1413 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 1414 break; 1415 1416 default: 1417 break; 1418 } 1419 1420 if (sc->jme_revfm >= 2) { 1421 /* set clock sources for tx mac and offload engine */ 1422 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1423 ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000; 1424 else 1425 ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100; 1426 } 1427 1428 CSR_WRITE_4(sc, JME_GHC, ghc); 1429 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 1430 CSR_WRITE_4(sc, JME_TXMAC, txmac); 1431 CSR_WRITE_4(sc, JME_TXPFC, txpause); 1432 1433 if (sc->jme_workaround & JME_WA_CRCERRORS) { 1434 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, 1435 JMPHY_CONF, phyconf); 1436 } 1437 if (sc->jme_workaround & JME_WA_PACKETLOSS) 1438 CSR_WRITE_4(sc, JME_GPREG1, gp1); 1439 } 1440 1441 int 1442 jme_intr(void *xsc) 1443 { 1444 struct jme_softc *sc = xsc; 1445 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1446 uint32_t status; 1447 int claimed = 0; 1448 1449 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 1450 if (status == 0 || status == 0xFFFFFFFF) 1451 return (0); 1452 1453 /* Disable interrupts. */ 1454 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 1455 1456 status = CSR_READ_4(sc, JME_INTR_STATUS); 1457 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 1458 goto back; 1459 1460 /* Reset PCC counter/timer and Ack interrupts. */ 1461 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 1462 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) 1463 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 1464 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1465 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 1466 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 1467 1468 if (ifp->if_flags & IFF_RUNNING) { 1469 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) 1470 jme_rxeof(sc); 1471 1472 if (status & INTR_RXQ_DESC_EMPTY) { 1473 /* 1474 * Notify hardware availability of new Rx buffers. 1475 * Reading RXCSR takes very long time under heavy 1476 * load so cache RXCSR value and writes the ORed 1477 * value with the kick command to the RXCSR. This 1478 * saves one register access cycle. 1479 */ 1480 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 1481 RXCSR_RX_ENB | RXCSR_RXQ_START); 1482 } 1483 1484 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) { 1485 jme_txeof(sc); 1486 jme_start(ifp); 1487 } 1488 } 1489 claimed = 1; 1490 back: 1491 /* Reenable interrupts. */ 1492 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1493 1494 return (claimed); 1495 } 1496 1497 void 1498 jme_txeof(struct jme_softc *sc) 1499 { 1500 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1501 struct jme_txdesc *txd; 1502 uint32_t status; 1503 int cons, nsegs; 1504 1505 cons = sc->jme_cdata.jme_tx_cons; 1506 if (cons == sc->jme_cdata.jme_tx_prod) 1507 return; 1508 1509 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1510 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1511 1512 /* 1513 * Go through our Tx list and free mbufs for those 1514 * frames which have been transmitted. 1515 */ 1516 while (cons != sc->jme_cdata.jme_tx_prod) { 1517 txd = &sc->jme_cdata.jme_txdesc[cons]; 1518 1519 if (txd->tx_m == NULL) 1520 panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname); 1521 1522 status = letoh32(txd->tx_desc->flags); 1523 if ((status & JME_TD_OWN) == JME_TD_OWN) 1524 break; 1525 1526 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) { 1527 ifp->if_oerrors++; 1528 } else { 1529 ifp->if_opackets++; 1530 if (status & JME_TD_COLLISION) { 1531 ifp->if_collisions += 1532 letoh32(txd->tx_desc->buflen) & 1533 JME_TD_BUF_LEN_MASK; 1534 } 1535 } 1536 1537 /* 1538 * Only the first descriptor of multi-descriptor 1539 * transmission is updated so driver have to skip entire 1540 * chained buffers for the transmiited frame. In other 1541 * words, JME_TD_OWN bit is valid only at the first 1542 * descriptor of a multi-descriptor transmission. 1543 */ 1544 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1545 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 1546 JME_DESC_INC(cons, JME_TX_RING_CNT); 1547 } 1548 1549 /* Reclaim transferred mbufs. */ 1550 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1551 m_freem(txd->tx_m); 1552 txd->tx_m = NULL; 1553 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 1554 if (sc->jme_cdata.jme_tx_cnt < 0) 1555 panic("%s: Active Tx desc counter was garbled", 1556 sc->sc_dev.dv_xname); 1557 txd->tx_ndesc = 0; 1558 } 1559 sc->jme_cdata.jme_tx_cons = cons; 1560 1561 if (sc->jme_cdata.jme_tx_cnt == 0) 1562 ifp->if_timer = 0; 1563 1564 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <= 1565 JME_TX_RING_CNT - JME_TXD_RSVD) 1566 ifq_clr_oactive(&ifp->if_snd); 1567 1568 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 1569 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1570 } 1571 1572 void 1573 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count) 1574 { 1575 int i; 1576 1577 for (i = 0; i < count; ++i) { 1578 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons]; 1579 1580 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 1581 desc->buflen = htole32(MCLBYTES); 1582 JME_DESC_INC(cons, JME_RX_RING_CNT); 1583 } 1584 } 1585 1586 /* Receive a frame. */ 1587 void 1588 jme_rxpkt(struct jme_softc *sc) 1589 { 1590 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1591 struct jme_desc *desc; 1592 struct jme_rxdesc *rxd; 1593 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1594 struct mbuf *mp, *m; 1595 uint32_t flags, status; 1596 int cons, count, nsegs; 1597 1598 cons = sc->jme_cdata.jme_rx_cons; 1599 desc = &sc->jme_rdata.jme_rx_ring[cons]; 1600 flags = letoh32(desc->flags); 1601 status = letoh32(desc->buflen); 1602 nsegs = JME_RX_NSEGS(status); 1603 1604 if (status & JME_RX_ERR_STAT) { 1605 ifp->if_ierrors++; 1606 jme_discard_rxbufs(sc, cons, nsegs); 1607 #ifdef JME_SHOW_ERRORS 1608 printf("%s : receive error = 0x%b\n", 1609 sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS); 1610 #endif 1611 sc->jme_cdata.jme_rx_cons += nsegs; 1612 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1613 return; 1614 } 1615 1616 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 1617 for (count = 0; count < nsegs; count++, 1618 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 1619 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 1620 mp = rxd->rx_m; 1621 1622 /* Add a new receive buffer to the ring. */ 1623 if (jme_newbuf(sc, rxd) != 0) { 1624 ifp->if_iqdrops++; 1625 /* Reuse buffer. */ 1626 jme_discard_rxbufs(sc, cons, nsegs - count); 1627 if (sc->jme_cdata.jme_rxhead != NULL) { 1628 m_freem(sc->jme_cdata.jme_rxhead); 1629 JME_RXCHAIN_RESET(sc); 1630 } 1631 break; 1632 } 1633 1634 /* 1635 * Assume we've received a full sized frame. 1636 * Actual size is fixed when we encounter the end of 1637 * multi-segmented frame. 1638 */ 1639 mp->m_len = MCLBYTES; 1640 1641 /* Chain received mbufs. */ 1642 if (sc->jme_cdata.jme_rxhead == NULL) { 1643 sc->jme_cdata.jme_rxhead = mp; 1644 sc->jme_cdata.jme_rxtail = mp; 1645 } else { 1646 /* 1647 * Receive processor can receive a maximum frame 1648 * size of 65535 bytes. 1649 */ 1650 mp->m_flags &= ~M_PKTHDR; 1651 sc->jme_cdata.jme_rxtail->m_next = mp; 1652 sc->jme_cdata.jme_rxtail = mp; 1653 } 1654 1655 if (count == nsegs - 1) { 1656 /* Last desc. for this frame. */ 1657 m = sc->jme_cdata.jme_rxhead; 1658 /* XXX assert PKTHDR? */ 1659 m->m_flags |= M_PKTHDR; 1660 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 1661 if (nsegs > 1) { 1662 /* Set first mbuf size. */ 1663 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 1664 /* Set last mbuf size. */ 1665 mp->m_len = sc->jme_cdata.jme_rxlen - 1666 ((MCLBYTES - JME_RX_PAD_BYTES) + 1667 (MCLBYTES * (nsegs - 2))); 1668 } else { 1669 m->m_len = sc->jme_cdata.jme_rxlen; 1670 } 1671 1672 /* 1673 * Account for 10bytes auto padding which is used 1674 * to align IP header on 32bit boundary. Also note, 1675 * CRC bytes is automatically removed by the 1676 * hardware. 1677 */ 1678 m->m_data += JME_RX_PAD_BYTES; 1679 1680 /* Set checksum information. */ 1681 if (flags & (JME_RD_IPV4|JME_RD_IPV6)) { 1682 if ((flags & JME_RD_IPV4) && 1683 (flags & JME_RD_IPCSUM)) 1684 m->m_pkthdr.csum_flags |= 1685 M_IPV4_CSUM_IN_OK; 1686 if ((flags & JME_RD_MORE_FRAG) == 0 && 1687 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 1688 (JME_RD_TCP | JME_RD_TCPCSUM) || 1689 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 1690 (JME_RD_UDP | JME_RD_UDPCSUM))) { 1691 m->m_pkthdr.csum_flags |= 1692 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1693 } 1694 } 1695 1696 #if NVLAN > 0 1697 /* Check for VLAN tagged packets. */ 1698 if (flags & JME_RD_VLAN_TAG) { 1699 m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK; 1700 m->m_flags |= M_VLANTAG; 1701 } 1702 #endif 1703 1704 ml_enqueue(&ml, m); 1705 1706 /* Reset mbuf chains. */ 1707 JME_RXCHAIN_RESET(sc); 1708 } 1709 } 1710 1711 if_input(ifp, &ml); 1712 1713 sc->jme_cdata.jme_rx_cons += nsegs; 1714 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 1715 } 1716 1717 void 1718 jme_rxeof(struct jme_softc *sc) 1719 { 1720 struct jme_desc *desc; 1721 int nsegs, prog, pktlen; 1722 1723 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1724 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1725 1726 prog = 0; 1727 for (;;) { 1728 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 1729 if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 1730 break; 1731 if ((letoh32(desc->buflen) & JME_RD_VALID) == 0) 1732 break; 1733 1734 /* 1735 * Check number of segments against received bytes. 1736 * Non-matching value would indicate that hardware 1737 * is still trying to update Rx descriptors. I'm not 1738 * sure whether this check is needed. 1739 */ 1740 nsegs = JME_RX_NSEGS(letoh32(desc->buflen)); 1741 pktlen = JME_RX_BYTES(letoh32(desc->buflen)); 1742 if (nsegs != howmany(pktlen, MCLBYTES)) { 1743 printf("%s: RX fragment count(%d) " 1744 "and packet size(%d) mismach\n", 1745 sc->sc_dev.dv_xname, nsegs, pktlen); 1746 break; 1747 } 1748 1749 /* Received a frame. */ 1750 jme_rxpkt(sc); 1751 prog++; 1752 } 1753 1754 if (prog > 0) { 1755 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 1756 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1757 } 1758 } 1759 1760 void 1761 jme_tick(void *xsc) 1762 { 1763 struct jme_softc *sc = xsc; 1764 struct mii_data *mii = &sc->sc_miibus; 1765 int s; 1766 1767 s = splnet(); 1768 mii_tick(mii); 1769 timeout_add_sec(&sc->jme_tick_ch, 1); 1770 splx(s); 1771 } 1772 1773 void 1774 jme_reset(struct jme_softc *sc) 1775 { 1776 #ifdef foo 1777 /* Stop receiver, transmitter. */ 1778 jme_stop_rx(sc); 1779 jme_stop_tx(sc); 1780 #endif 1781 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 1782 DELAY(10); 1783 CSR_WRITE_4(sc, JME_GHC, 0); 1784 } 1785 1786 int 1787 jme_init(struct ifnet *ifp) 1788 { 1789 struct jme_softc *sc = ifp->if_softc; 1790 struct mii_data *mii; 1791 uint8_t eaddr[ETHER_ADDR_LEN]; 1792 bus_addr_t paddr; 1793 uint32_t reg; 1794 int error; 1795 1796 /* 1797 * Cancel any pending I/O. 1798 */ 1799 jme_stop(sc); 1800 1801 /* 1802 * Reset the chip to a known state. 1803 */ 1804 jme_reset(sc); 1805 1806 /* Init descriptors. */ 1807 error = jme_init_rx_ring(sc); 1808 if (error != 0) { 1809 printf("%s: initialization failed: no memory for Rx buffers.\n", 1810 sc->sc_dev.dv_xname); 1811 jme_stop(sc); 1812 return (error); 1813 } 1814 jme_init_tx_ring(sc); 1815 1816 /* Initialize shadow status block. */ 1817 jme_init_ssb(sc); 1818 1819 /* Reprogram the station address. */ 1820 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1821 CSR_WRITE_4(sc, JME_PAR0, 1822 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 1823 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 1824 1825 /* 1826 * Configure Tx queue. 1827 * Tx priority queue weight value : 0 1828 * Tx FIFO threshold for processing next packet : 16QW 1829 * Maximum Tx DMA length : 512 1830 * Allow Tx DMA burst. 1831 */ 1832 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 1833 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 1834 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 1835 sc->jme_txcsr |= sc->jme_tx_dma_size; 1836 sc->jme_txcsr |= TXCSR_DMA_BURST; 1837 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 1838 1839 /* Set Tx descriptor counter. */ 1840 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 1841 1842 /* Set Tx ring address to the hardware. */ 1843 paddr = JME_TX_RING_ADDR(sc, 0); 1844 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 1845 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 1846 1847 /* Configure TxMAC parameters. */ 1848 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 1849 reg |= TXMAC_THRESH_1_PKT; 1850 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 1851 CSR_WRITE_4(sc, JME_TXMAC, reg); 1852 1853 /* 1854 * Configure Rx queue. 1855 * FIFO full threshold for transmitting Tx pause packet : 128T 1856 * FIFO threshold for processing next packet : 128QW 1857 * Rx queue 0 select 1858 * Max Rx DMA length : 128 1859 * Rx descriptor retry : 32 1860 * Rx descriptor retry time gap : 256ns 1861 * Don't receive runt/bad frame. 1862 */ 1863 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 1864 1865 /* 1866 * Since Rx FIFO size is 4K bytes, receiving frames larger 1867 * than 4K bytes will suffer from Rx FIFO overruns. So 1868 * decrease FIFO threshold to reduce the FIFO overruns for 1869 * frames larger than 4000 bytes. 1870 * For best performance of standard MTU sized frames use 1871 * maximum allowable FIFO threshold, which is 32QW for 1872 * chips with a full mask >= 2 otherwise 128QW. FIFO 1873 * thresholds of 64QW and 128QW are not valid for chips 1874 * with a full mask >= 2. 1875 */ 1876 if (sc->jme_revfm >= 2) 1877 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1878 else { 1879 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1880 ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE) 1881 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 1882 else 1883 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 1884 } 1885 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 1886 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 1887 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 1888 /* XXX TODO DROP_BAD */ 1889 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 1890 1891 /* Set Rx descriptor counter. */ 1892 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 1893 1894 /* Set Rx ring address to the hardware. */ 1895 paddr = JME_RX_RING_ADDR(sc, 0); 1896 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 1897 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 1898 1899 /* Clear receive filter. */ 1900 CSR_WRITE_4(sc, JME_RXMAC, 0); 1901 1902 /* Set up the receive filter. */ 1903 jme_iff(sc); 1904 1905 jme_set_vlan(sc); 1906 1907 /* 1908 * Disable all WOL bits as WOL can interfere normal Rx 1909 * operation. Also clear WOL detection status bits. 1910 */ 1911 reg = CSR_READ_4(sc, JME_PMCS); 1912 reg &= ~PMCS_WOL_ENB_MASK; 1913 CSR_WRITE_4(sc, JME_PMCS, reg); 1914 1915 /* 1916 * Pad 10bytes right before received frame. This will greatly 1917 * help Rx performance on strict-alignment architectures as 1918 * it does not need to copy the frame to align the payload. 1919 */ 1920 reg = CSR_READ_4(sc, JME_RXMAC); 1921 reg |= RXMAC_PAD_10BYTES; 1922 reg |= RXMAC_CSUM_ENB; 1923 CSR_WRITE_4(sc, JME_RXMAC, reg); 1924 1925 /* Configure general purpose reg0 */ 1926 reg = CSR_READ_4(sc, JME_GPREG0); 1927 reg &= ~GPREG0_PCC_UNIT_MASK; 1928 /* Set PCC timer resolution to micro-seconds unit. */ 1929 reg |= GPREG0_PCC_UNIT_US; 1930 /* 1931 * Disable all shadow register posting as we have to read 1932 * JME_INTR_STATUS register in jme_intr. Also it seems 1933 * that it's hard to synchronize interrupt status between 1934 * hardware and software with shadow posting due to 1935 * requirements of bus_dmamap_sync(9). 1936 */ 1937 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 1938 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 1939 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 1940 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 1941 /* Disable posting of DW0. */ 1942 reg &= ~GPREG0_POST_DW0_ENB; 1943 /* Clear PME message. */ 1944 reg &= ~GPREG0_PME_ENB; 1945 /* Set PHY address. */ 1946 reg &= ~GPREG0_PHY_ADDR_MASK; 1947 reg |= sc->jme_phyaddr; 1948 CSR_WRITE_4(sc, JME_GPREG0, reg); 1949 1950 /* Configure Tx queue 0 packet completion coalescing. */ 1951 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1952 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 1953 PCCTX_COAL_TO_MASK; 1954 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1955 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 1956 PCCTX_COAL_PKT_MASK; 1957 reg |= PCCTX_COAL_TXQ0; 1958 CSR_WRITE_4(sc, JME_PCCTX, reg); 1959 1960 /* Configure Rx queue 0 packet completion coalescing. */ 1961 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1962 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 1963 PCCRX_COAL_TO_MASK; 1964 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1965 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 1966 PCCRX_COAL_PKT_MASK; 1967 CSR_WRITE_4(sc, JME_PCCRX0, reg); 1968 1969 /* Configure shadow status block but don't enable posting. */ 1970 paddr = sc->jme_rdata.jme_ssb_block_paddr; 1971 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 1972 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 1973 1974 /* Disable Timer 1 and Timer 2. */ 1975 CSR_WRITE_4(sc, JME_TIMER1, 0); 1976 CSR_WRITE_4(sc, JME_TIMER2, 0); 1977 1978 /* Configure retry transmit period, retry limit value. */ 1979 CSR_WRITE_4(sc, JME_TXTRHD, 1980 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 1981 TXTRHD_RT_PERIOD_MASK) | 1982 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 1983 TXTRHD_RT_LIMIT_SHIFT)); 1984 1985 /* Disable RSS. */ 1986 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 1987 1988 /* Initialize the interrupt mask. */ 1989 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 1990 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 1991 1992 /* 1993 * Enabling Tx/Rx DMA engines and Rx queue processing is 1994 * done after detection of valid link in jme_miibus_statchg. 1995 */ 1996 sc->jme_flags &= ~JME_FLAG_LINK; 1997 1998 /* Set the current media. */ 1999 mii = &sc->sc_miibus; 2000 mii_mediachg(mii); 2001 2002 timeout_add_sec(&sc->jme_tick_ch, 1); 2003 2004 ifp->if_flags |= IFF_RUNNING; 2005 ifq_clr_oactive(&ifp->if_snd); 2006 2007 return (0); 2008 } 2009 2010 void 2011 jme_stop(struct jme_softc *sc) 2012 { 2013 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2014 struct jme_txdesc *txd; 2015 struct jme_rxdesc *rxd; 2016 int i; 2017 2018 /* 2019 * Mark the interface down and cancel the watchdog timer. 2020 */ 2021 ifp->if_flags &= ~IFF_RUNNING; 2022 ifq_clr_oactive(&ifp->if_snd); 2023 ifp->if_timer = 0; 2024 2025 timeout_del(&sc->jme_tick_ch); 2026 sc->jme_flags &= ~JME_FLAG_LINK; 2027 2028 /* 2029 * Disable interrupts. 2030 */ 2031 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2032 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2033 2034 /* Disable updating shadow status block. */ 2035 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 2036 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 2037 2038 /* Stop receiver, transmitter. */ 2039 jme_stop_rx(sc); 2040 jme_stop_tx(sc); 2041 2042 #ifdef foo 2043 /* Reclaim Rx/Tx buffers that have been completed. */ 2044 jme_rxeof(sc); 2045 if (sc->jme_cdata.jme_rxhead != NULL) 2046 m_freem(sc->jme_cdata.jme_rxhead); 2047 JME_RXCHAIN_RESET(sc); 2048 jme_txeof(sc); 2049 #endif 2050 2051 /* 2052 * Free partial finished RX segments 2053 */ 2054 if (sc->jme_cdata.jme_rxhead != NULL) 2055 m_freem(sc->jme_cdata.jme_rxhead); 2056 JME_RXCHAIN_RESET(sc); 2057 2058 /* 2059 * Free RX and TX mbufs still in the queues. 2060 */ 2061 for (i = 0; i < JME_RX_RING_CNT; i++) { 2062 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2063 if (rxd->rx_m != NULL) { 2064 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2065 m_freem(rxd->rx_m); 2066 rxd->rx_m = NULL; 2067 } 2068 } 2069 for (i = 0; i < JME_TX_RING_CNT; i++) { 2070 txd = &sc->jme_cdata.jme_txdesc[i]; 2071 if (txd->tx_m != NULL) { 2072 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2073 m_freem(txd->tx_m); 2074 txd->tx_m = NULL; 2075 txd->tx_ndesc = 0; 2076 } 2077 } 2078 } 2079 2080 void 2081 jme_stop_tx(struct jme_softc *sc) 2082 { 2083 uint32_t reg; 2084 int i; 2085 2086 reg = CSR_READ_4(sc, JME_TXCSR); 2087 if ((reg & TXCSR_TX_ENB) == 0) 2088 return; 2089 reg &= ~TXCSR_TX_ENB; 2090 CSR_WRITE_4(sc, JME_TXCSR, reg); 2091 for (i = JME_TIMEOUT; i > 0; i--) { 2092 DELAY(1); 2093 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 2094 break; 2095 } 2096 if (i == 0) 2097 printf("%s: stopping transmitter timeout!\n", 2098 sc->sc_dev.dv_xname); 2099 } 2100 2101 void 2102 jme_stop_rx(struct jme_softc *sc) 2103 { 2104 uint32_t reg; 2105 int i; 2106 2107 reg = CSR_READ_4(sc, JME_RXCSR); 2108 if ((reg & RXCSR_RX_ENB) == 0) 2109 return; 2110 reg &= ~RXCSR_RX_ENB; 2111 CSR_WRITE_4(sc, JME_RXCSR, reg); 2112 for (i = JME_TIMEOUT; i > 0; i--) { 2113 DELAY(1); 2114 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 2115 break; 2116 } 2117 if (i == 0) 2118 printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname); 2119 } 2120 2121 void 2122 jme_init_tx_ring(struct jme_softc *sc) 2123 { 2124 struct jme_ring_data *rd; 2125 struct jme_txdesc *txd; 2126 int i; 2127 2128 sc->jme_cdata.jme_tx_prod = 0; 2129 sc->jme_cdata.jme_tx_cons = 0; 2130 sc->jme_cdata.jme_tx_cnt = 0; 2131 2132 rd = &sc->jme_rdata; 2133 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 2134 for (i = 0; i < JME_TX_RING_CNT; i++) { 2135 txd = &sc->jme_cdata.jme_txdesc[i]; 2136 txd->tx_m = NULL; 2137 txd->tx_desc = &rd->jme_tx_ring[i]; 2138 txd->tx_ndesc = 0; 2139 } 2140 2141 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0, 2142 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2143 } 2144 2145 void 2146 jme_init_ssb(struct jme_softc *sc) 2147 { 2148 struct jme_ring_data *rd; 2149 2150 rd = &sc->jme_rdata; 2151 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 2152 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0, 2153 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2154 } 2155 2156 int 2157 jme_init_rx_ring(struct jme_softc *sc) 2158 { 2159 struct jme_ring_data *rd; 2160 struct jme_rxdesc *rxd; 2161 int i; 2162 2163 KASSERT(sc->jme_cdata.jme_rxhead == NULL && 2164 sc->jme_cdata.jme_rxtail == NULL && 2165 sc->jme_cdata.jme_rxlen == 0); 2166 sc->jme_cdata.jme_rx_cons = 0; 2167 2168 rd = &sc->jme_rdata; 2169 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 2170 for (i = 0; i < JME_RX_RING_CNT; i++) { 2171 int error; 2172 2173 rxd = &sc->jme_cdata.jme_rxdesc[i]; 2174 rxd->rx_m = NULL; 2175 rxd->rx_desc = &rd->jme_rx_ring[i]; 2176 error = jme_newbuf(sc, rxd); 2177 if (error) 2178 return (error); 2179 } 2180 2181 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0, 2182 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2183 2184 return (0); 2185 } 2186 2187 int 2188 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd) 2189 { 2190 struct jme_desc *desc; 2191 struct mbuf *m; 2192 bus_dmamap_t map; 2193 int error; 2194 2195 MGETHDR(m, M_DONTWAIT, MT_DATA); 2196 if (m == NULL) 2197 return (ENOBUFS); 2198 MCLGET(m, M_DONTWAIT); 2199 if (!(m->m_flags & M_EXT)) { 2200 m_freem(m); 2201 return (ENOBUFS); 2202 } 2203 2204 /* 2205 * JMC250 has 64bit boundary alignment limitation so jme(4) 2206 * takes advantage of 10 bytes padding feature of hardware 2207 * in order not to copy entire frame to align IP header on 2208 * 32bit boundary. 2209 */ 2210 m->m_len = m->m_pkthdr.len = MCLBYTES; 2211 2212 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2213 sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT); 2214 2215 if (error != 0) { 2216 m_freem(m); 2217 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2218 return (error); 2219 } 2220 2221 if (rxd->rx_m != NULL) { 2222 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2223 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2224 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2225 } 2226 map = rxd->rx_dmamap; 2227 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 2228 sc->jme_cdata.jme_rx_sparemap = map; 2229 rxd->rx_m = m; 2230 2231 desc = rxd->rx_desc; 2232 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len); 2233 desc->addr_lo = 2234 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2235 desc->addr_hi = 2236 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr)); 2237 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2238 2239 return (0); 2240 } 2241 2242 void 2243 jme_set_vlan(struct jme_softc *sc) 2244 { 2245 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2246 uint32_t reg; 2247 2248 reg = CSR_READ_4(sc, JME_RXMAC); 2249 reg &= ~RXMAC_VLAN_ENB; 2250 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2251 reg |= RXMAC_VLAN_ENB; 2252 CSR_WRITE_4(sc, JME_RXMAC, reg); 2253 } 2254 2255 void 2256 jme_iff(struct jme_softc *sc) 2257 { 2258 struct arpcom *ac = &sc->sc_arpcom; 2259 struct ifnet *ifp = &ac->ac_if; 2260 struct ether_multi *enm; 2261 struct ether_multistep step; 2262 uint32_t crc; 2263 uint32_t mchash[2]; 2264 uint32_t rxcfg; 2265 2266 rxcfg = CSR_READ_4(sc, JME_RXMAC); 2267 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 2268 RXMAC_ALLMULTI); 2269 ifp->if_flags &= ~IFF_ALLMULTI; 2270 2271 /* 2272 * Always accept frames destined to our station address. 2273 * Always accept broadcast frames. 2274 */ 2275 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST; 2276 2277 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2278 ifp->if_flags |= IFF_ALLMULTI; 2279 if (ifp->if_flags & IFF_PROMISC) 2280 rxcfg |= RXMAC_PROMISC; 2281 else 2282 rxcfg |= RXMAC_ALLMULTI; 2283 mchash[0] = mchash[1] = 0xFFFFFFFF; 2284 } else { 2285 /* 2286 * Set up the multicast address filter by passing all 2287 * multicast addresses through a CRC generator, and then 2288 * using the low-order 6 bits as an index into the 64 bit 2289 * multicast hash table. The high order bits select the 2290 * register, while the rest of the bits select the bit 2291 * within the register. 2292 */ 2293 rxcfg |= RXMAC_MULTICAST; 2294 bzero(mchash, sizeof(mchash)); 2295 2296 ETHER_FIRST_MULTI(step, ac, enm); 2297 while (enm != NULL) { 2298 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2299 2300 /* Just want the 6 least significant bits. */ 2301 crc &= 0x3f; 2302 2303 /* Set the corresponding bit in the hash table. */ 2304 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2305 2306 ETHER_NEXT_MULTI(step, enm); 2307 } 2308 } 2309 2310 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 2311 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 2312 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 2313 } 2314