1 /* $OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $ */ 2 3 /* 4 * Copyright (c) 2019 Kevin Lo <kevlo@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "bpfilter.h" 20 #include "vlan.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/sockio.h> 25 #include <sys/mbuf.h> 26 #include <sys/malloc.h> 27 #include <sys/kernel.h> 28 #include <sys/socket.h> 29 #include <sys/device.h> 30 #include <sys/endian.h> 31 32 #include <net/if.h> 33 #include <net/if_media.h> 34 35 #include <netinet/in.h> 36 #include <netinet/if_ether.h> 37 38 #if NBPFILTER > 0 39 #include <net/bpf.h> 40 #endif 41 42 #include <machine/bus.h> 43 #include <machine/intr.h> 44 45 #include <dev/mii/mii.h> 46 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcidevs.h> 50 51 #include <dev/pci/if_rgereg.h> 52 53 int rge_match(struct device *, void *, void *); 54 void rge_attach(struct device *, struct device *, void *); 55 int rge_intr(void *); 56 int rge_encap(struct rge_softc *, struct mbuf *, int); 57 int rge_ioctl(struct ifnet *, u_long, caddr_t); 58 void rge_start(struct ifqueue *); 59 void rge_watchdog(struct ifnet *); 60 int rge_init(struct ifnet *); 61 void rge_stop(struct ifnet *); 62 int rge_ifmedia_upd(struct ifnet *); 63 void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 64 int rge_allocmem(struct rge_softc *); 65 int rge_newbuf(struct rge_softc *, int); 66 void rge_discard_rxbuf(struct rge_softc *, int); 67 int rge_rx_list_init(struct rge_softc *); 68 void rge_tx_list_init(struct rge_softc *); 69 int rge_rxeof(struct rge_softc *); 70 int rge_txeof(struct rge_softc *); 71 void rge_reset(struct rge_softc *); 72 void rge_iff(struct rge_softc *); 73 void rge_set_phy_power(struct rge_softc *, int); 74 void rge_phy_config(struct rge_softc *); 75 void rge_set_macaddr(struct rge_softc *, const uint8_t *); 76 void rge_get_macaddr(struct rge_softc *, uint8_t *); 77 void rge_hw_init(struct rge_softc *); 78 void rge_disable_phy_ocp_pwrsave(struct rge_softc *); 79 void rge_patch_phy_mcu(struct rge_softc *, int); 80 void rge_add_media_types(struct rge_softc *); 81 void rge_config_imtype(struct rge_softc *, int); 82 void rge_disable_sim_im(struct rge_softc *); 83 void rge_setup_sim_im(struct rge_softc *); 84 void rge_setup_intr(struct rge_softc *, int); 85 void rge_exit_oob(struct rge_softc *); 86 void rge_write_csi(struct rge_softc *, uint32_t, uint32_t); 87 uint32_t rge_read_csi(struct rge_softc *, uint32_t); 88 void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t); 89 uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t); 90 void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t); 91 void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t); 92 void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t); 93 uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t); 94 int rge_get_link_status(struct rge_softc *); 95 void rge_txstart(void *); 96 void rge_tick(void *); 97 void rge_link_state(struct rge_softc *); 98 99 static const struct { 100 uint16_t reg; 101 uint16_t val; 102 } rtl8125_def_bps[] = { 103 RTL8125_DEF_BPS 104 }, rtl8125_mac_cfg2_ephy[] = { 105 RTL8125_MAC_CFG2_EPHY 106 }, rtl8125_mac_cfg2_mcu[] = { 107 RTL8125_MAC_CFG2_MCU 108 }, rtl8125_mac_cfg3_ephy[] = { 109 RTL8125_MAC_CFG3_EPHY 110 }, rtl8125_mac_cfg3_mcu[] = { 111 RTL8125_MAC_CFG3_MCU 112 }; 113 114 struct cfattach rge_ca = { 115 sizeof(struct rge_softc), rge_match, rge_attach 116 }; 117 118 struct cfdriver rge_cd = { 119 NULL, "rge", DV_IFNET 120 }; 121 122 const struct pci_matchid rge_devices[] = { 123 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 }, 124 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 } 125 }; 126 127 int 128 rge_match(struct device *parent, void *match, void *aux) 129 { 130 return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices, 131 nitems(rge_devices))); 132 } 133 134 void 135 rge_attach(struct device *parent, struct device *self, void *aux) 136 { 137 struct rge_softc *sc = (struct rge_softc *)self; 138 struct pci_attach_args *pa = aux; 139 pci_chipset_tag_t pc = pa->pa_pc; 140 pci_intr_handle_t ih; 141 const char *intrstr = NULL; 142 struct ifnet *ifp; 143 pcireg_t reg; 144 uint32_t hwrev; 145 uint8_t eaddr[ETHER_ADDR_LEN]; 146 int offset; 147 148 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 149 150 /* 151 * Map control/status registers. 152 */ 153 if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM | 154 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle, 155 NULL, &sc->rge_bsize, 0)) { 156 if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM | 157 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag, 158 &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) { 159 if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO, 160 0, &sc->rge_btag, &sc->rge_bhandle, NULL, 161 &sc->rge_bsize, 0)) { 162 printf(": can't map mem or i/o space\n"); 163 return; 164 } 165 } 166 } 167 168 /* 169 * Allocate interrupt. 170 */ 171 if (pci_intr_map_msi(pa, &ih) == 0) 172 sc->rge_flags |= RGE_FLAG_MSI; 173 else if (pci_intr_map(pa, &ih) != 0) { 174 printf(": couldn't map interrupt\n"); 175 return; 176 } 177 intrstr = pci_intr_string(pc, ih); 178 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr, 179 sc, sc->sc_dev.dv_xname); 180 if (sc->sc_ih == NULL) { 181 printf(": couldn't establish interrupt"); 182 if (intrstr != NULL) 183 printf(" at %s", intrstr); 184 printf("\n"); 185 return; 186 } 187 printf(": %s", intrstr); 188 189 sc->sc_dmat = pa->pa_dmat; 190 sc->sc_pc = pa->pa_pc; 191 sc->sc_tag = pa->pa_tag; 192 193 /* Determine hardware revision */ 194 hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV; 195 switch (hwrev) { 196 case 0x60800000: 197 sc->rge_type = MAC_CFG2; 198 break; 199 case 0x60900000: 200 sc->rge_type = MAC_CFG3; 201 break; 202 default: 203 printf(": unknown version 0x%08x\n", hwrev); 204 return; 205 } 206 207 rge_config_imtype(sc, RGE_IMTYPE_SIM); 208 209 /* 210 * PCI Express check. 211 */ 212 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 213 &offset, NULL)) { 214 /* Disable PCIe ASPM and ECPM. */ 215 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 216 offset + PCI_PCIE_LCSR); 217 reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 | 218 PCI_PCIE_LCSR_ECPM); 219 pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR, 220 reg); 221 } 222 223 rge_exit_oob(sc); 224 rge_hw_init(sc); 225 226 rge_get_macaddr(sc, eaddr); 227 printf(", address %s\n", ether_sprintf(eaddr)); 228 229 memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN); 230 231 rge_set_phy_power(sc, 1); 232 rge_phy_config(sc); 233 234 if (rge_allocmem(sc)) 235 return; 236 237 ifp = &sc->sc_arpcom.ac_if; 238 ifp->if_softc = sc; 239 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 240 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 241 ifp->if_xflags = IFXF_MPSAFE; 242 ifp->if_ioctl = rge_ioctl; 243 ifp->if_qstart = rge_start; 244 ifp->if_watchdog = rge_watchdog; 245 IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT); 246 ifp->if_hardmtu = RGE_JUMBO_MTU; 247 248 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 249 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 250 251 #if NVLAN > 0 252 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 253 #endif 254 255 timeout_set(&sc->sc_timeout, rge_tick, sc); 256 task_set(&sc->sc_task, rge_txstart, sc); 257 258 /* Initialize ifmedia structures. */ 259 ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd, 260 rge_ifmedia_sts); 261 rge_add_media_types(sc); 262 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 263 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 264 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media; 265 266 if_attach(ifp); 267 ether_ifattach(ifp); 268 } 269 270 int 271 rge_intr(void *arg) 272 { 273 struct rge_softc *sc = arg; 274 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 275 uint32_t status; 276 int claimed = 0, rx, tx; 277 278 if (!(ifp->if_flags & IFF_RUNNING)) 279 return (0); 280 281 /* Disable interrupts. */ 282 RGE_WRITE_4(sc, RGE_IMR, 0); 283 284 status = RGE_READ_4(sc, RGE_ISR); 285 if (!(sc->rge_flags & RGE_FLAG_MSI)) { 286 if ((status & RGE_INTRS) == 0 || status == 0xffffffff) 287 return (0); 288 } 289 if (status) 290 RGE_WRITE_4(sc, RGE_ISR, status); 291 292 if (status & RGE_ISR_PCS_TIMEOUT) 293 claimed = 1; 294 295 rx = tx = 0; 296 if (status & RGE_INTRS) { 297 if (status & 298 (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) { 299 rx |= rge_rxeof(sc); 300 claimed = 1; 301 } 302 303 if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) { 304 tx |= rge_txeof(sc); 305 claimed = 1; 306 } 307 308 if (status & RGE_ISR_SYSTEM_ERR) { 309 KERNEL_LOCK(); 310 rge_init(ifp); 311 KERNEL_UNLOCK(); 312 claimed = 1; 313 } 314 } 315 316 if (sc->rge_timerintr) { 317 if ((tx | rx) == 0) { 318 /* 319 * Nothing needs to be processed, fallback 320 * to use TX/RX interrupts. 321 */ 322 rge_setup_intr(sc, RGE_IMTYPE_NONE); 323 324 /* 325 * Recollect, mainly to avoid the possible 326 * race introduced by changing interrupt 327 * masks. 328 */ 329 rge_rxeof(sc); 330 rge_txeof(sc); 331 } else 332 RGE_WRITE_4(sc, RGE_TIMERCNT, 1); 333 } else if (tx | rx) { 334 /* 335 * Assume that using simulated interrupt moderation 336 * (hardware timer based) could reduce the interrupt 337 * rate. 338 */ 339 rge_setup_intr(sc, RGE_IMTYPE_SIM); 340 } 341 342 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs); 343 344 return (claimed); 345 } 346 347 int 348 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx) 349 { 350 struct rge_tx_desc *d = NULL; 351 struct rge_txq *txq; 352 bus_dmamap_t txmap; 353 uint32_t cmdsts, cflags = 0; 354 int cur, error, i, last, nsegs; 355 356 /* 357 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested. 358 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not 359 * take affect. 360 */ 361 if ((m->m_pkthdr.csum_flags & 362 (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) { 363 cflags |= RGE_TDEXTSTS_IPCSUM; 364 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 365 cflags |= RGE_TDEXTSTS_TCPCSUM; 366 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 367 cflags |= RGE_TDEXTSTS_UDPCSUM; 368 } 369 370 txq = &sc->rge_ldata.rge_txq[idx]; 371 txmap = txq->txq_dmamap; 372 373 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT); 374 switch (error) { 375 case 0: 376 break; 377 case EFBIG: /* mbuf chain is too fragmented */ 378 if (m_defrag(m, M_DONTWAIT) == 0 && 379 bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, 380 BUS_DMA_NOWAIT) == 0) 381 break; 382 383 /* FALLTHROUGH */ 384 default: 385 return (0); 386 } 387 388 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 389 BUS_DMASYNC_PREWRITE); 390 391 nsegs = txmap->dm_nsegs; 392 393 /* Set up hardware VLAN tagging. */ 394 #if NVLAN > 0 395 if (m->m_flags & M_VLANTAG) 396 cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG); 397 #endif 398 399 cur = idx; 400 cmdsts = RGE_TDCMDSTS_SOF; 401 402 for (i = 0; i < txmap->dm_nsegs; i++) { 403 d = &sc->rge_ldata.rge_tx_list[cur]; 404 405 d->rge_extsts = htole32(cflags); 406 d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr)); 407 d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr)); 408 409 cmdsts |= txmap->dm_segs[i].ds_len; 410 411 if (cur == RGE_TX_LIST_CNT - 1) 412 cmdsts |= RGE_TDCMDSTS_EOR; 413 414 d->rge_cmdsts = htole32(cmdsts); 415 416 last = cur; 417 cmdsts = RGE_TDCMDSTS_OWN; 418 cur = RGE_NEXT_TX_DESC(cur); 419 } 420 421 /* Set EOF on the last descriptor. */ 422 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF); 423 424 /* Transfer ownership of packet to the chip. */ 425 d = &sc->rge_ldata.rge_tx_list[idx]; 426 427 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN); 428 429 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 430 cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc), 431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 432 433 /* Update info of TX queue and descriptors. */ 434 txq->txq_mbuf = m; 435 txq->txq_descidx = last; 436 437 return (nsegs); 438 } 439 440 int 441 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 442 { 443 struct rge_softc *sc = ifp->if_softc; 444 struct ifreq *ifr = (struct ifreq *)data; 445 int s, error = 0; 446 447 s = splnet(); 448 449 switch (cmd) { 450 case SIOCSIFADDR: 451 ifp->if_flags |= IFF_UP; 452 if (!(ifp->if_flags & IFF_RUNNING)) 453 rge_init(ifp); 454 break; 455 case SIOCSIFFLAGS: 456 if (ifp->if_flags & IFF_UP) { 457 if (ifp->if_flags & IFF_RUNNING) 458 error = ENETRESET; 459 else 460 rge_init(ifp); 461 } else { 462 if (ifp->if_flags & IFF_RUNNING) 463 rge_stop(ifp); 464 } 465 break; 466 case SIOCGIFMEDIA: 467 case SIOCSIFMEDIA: 468 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 469 break; 470 case SIOCSIFMTU: 471 if (ifr->ifr_mtu > ifp->if_hardmtu) { 472 error = EINVAL; 473 break; 474 } 475 ifp->if_mtu = ifr->ifr_mtu; 476 break; 477 default: 478 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 479 } 480 481 if (error == ENETRESET) { 482 if (ifp->if_flags & IFF_RUNNING) 483 rge_iff(sc); 484 error = 0; 485 } 486 487 splx(s); 488 return (error); 489 } 490 491 void 492 rge_start(struct ifqueue *ifq) 493 { 494 struct ifnet *ifp = ifq->ifq_if; 495 struct rge_softc *sc = ifp->if_softc; 496 struct mbuf *m; 497 int free, idx, used; 498 int queued = 0; 499 500 if (!LINK_STATE_IS_UP(ifp->if_link_state)) { 501 ifq_purge(ifq); 502 return; 503 } 504 505 /* Calculate free space. */ 506 idx = sc->rge_ldata.rge_txq_prodidx; 507 free = sc->rge_ldata.rge_txq_considx; 508 if (free <= idx) 509 free += RGE_TX_LIST_CNT; 510 free -= idx; 511 512 for (;;) { 513 if (RGE_TX_NSEGS >= free + 2) { 514 ifq_set_oactive(&ifp->if_snd); 515 break; 516 } 517 518 m = ifq_dequeue(ifq); 519 if (m == NULL) 520 break; 521 522 used = rge_encap(sc, m, idx); 523 if (used == 0) { 524 m_freem(m); 525 continue; 526 } 527 528 KASSERT(used <= free); 529 free -= used; 530 531 #if NBPFILTER > 0 532 if (ifp->if_bpf) 533 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 534 #endif 535 536 idx += used; 537 if (idx >= RGE_TX_LIST_CNT) 538 idx -= RGE_TX_LIST_CNT; 539 540 queued++; 541 } 542 543 if (queued == 0) 544 return; 545 546 /* Set a timeout in case the chip goes out to lunch. */ 547 ifp->if_timer = 5; 548 549 sc->rge_ldata.rge_txq_prodidx = idx; 550 ifq_serialize(ifq, &sc->sc_task); 551 } 552 553 void 554 rge_watchdog(struct ifnet *ifp) 555 { 556 struct rge_softc *sc = ifp->if_softc; 557 558 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 559 ifp->if_oerrors++; 560 561 rge_init(ifp); 562 } 563 564 int 565 rge_init(struct ifnet *ifp) 566 { 567 struct rge_softc *sc = ifp->if_softc; 568 uint32_t val; 569 uint16_t max_frame_size; 570 int i; 571 572 rge_stop(ifp); 573 574 /* Set MAC address. */ 575 rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 576 577 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 578 if (ifp->if_mtu < ETHERMTU) 579 max_frame_size = ETHERMTU; 580 else 581 max_frame_size = ifp->if_mtu; 582 583 max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 584 ETHER_CRC_LEN + 1; 585 586 if (max_frame_size > RGE_JUMBO_FRAMELEN) 587 max_frame_size -= 1; 588 589 RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size); 590 591 /* Initialize RX descriptors list. */ 592 if (rge_rx_list_init(sc) == ENOBUFS) { 593 printf("%s: init failed: no memory for RX buffers\n", 594 sc->sc_dev.dv_xname); 595 rge_stop(ifp); 596 return (ENOBUFS); 597 } 598 599 /* Initialize TX descriptors. */ 600 rge_tx_list_init(sc); 601 602 /* Load the addresses of the RX and TX lists into the chip. */ 603 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO, 604 RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr)); 605 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI, 606 RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr)); 607 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO, 608 RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr)); 609 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI, 610 RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr)); 611 612 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 613 614 RGE_CLRBIT_1(sc, 0xf1, 0x80); 615 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN); 616 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS); 617 RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23); 618 619 /* Clear interrupt moderation timer. */ 620 for (i = 0; i < 64; i++) 621 RGE_WRITE_4(sc, RGE_IM(i), 0); 622 623 /* Set the initial RX and TX configurations. */ 624 RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG); 625 RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG); 626 627 val = rge_read_csi(sc, 0x70c) & ~0xff000000; 628 rge_write_csi(sc, 0x70c, val | 0x27000000); 629 630 /* Enable hardware optimization function. */ 631 val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000; 632 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000); 633 634 RGE_WRITE_2(sc, 0x0382, 0x221b); 635 RGE_WRITE_1(sc, 0x4500, 0); 636 RGE_WRITE_2(sc, 0x4800, 0); 637 RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN); 638 639 rge_write_mac_ocp(sc, 0xc140, 0xffff); 640 rge_write_mac_ocp(sc, 0xc142, 0xffff); 641 642 val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff; 643 rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9); 644 645 RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff); 646 RGE_MAC_SETBIT(sc, 0xe860, 0x0080); 647 RGE_MAC_SETBIT(sc, 0xeb58, 0x0001); 648 649 val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700; 650 rge_write_mac_ocp(sc, 0xe614, val | 0x0400); 651 652 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00); 653 654 val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030; 655 rge_write_mac_ocp(sc, 0xe63e, val | 0x0020); 656 657 RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c); 658 659 val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f; 660 rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033); 661 662 val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0; 663 rge_write_mac_ocp(sc, 0xeb50, val | 0x0040); 664 665 val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0; 666 rge_write_mac_ocp(sc, 0xe056, val | 0x0030); 667 668 RGE_WRITE_1(sc, RGE_TDFNR, 0x10); 669 670 RGE_MAC_CLRBIT(sc, 0xe040, 0x1000); 671 672 val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f; 673 rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403); 674 675 RGE_MAC_SETBIT(sc, 0xe052, 0x0068); 676 RGE_MAC_CLRBIT(sc, 0xe052, 0x0080); 677 678 val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080; 679 rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00); 680 681 val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff; 682 rge_write_mac_ocp(sc, 0xd430, val | 0x047f); 683 684 RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0); 685 686 /* Disable EEE plus. */ 687 RGE_MAC_CLRBIT(sc, 0xe080, 0x0002); 688 689 RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004); 690 691 RGE_MAC_SETBIT(sc, 0xeb54, 0x0001); 692 DELAY(1); 693 RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001); 694 695 RGE_CLRBIT_4(sc, 0x1880, 0x0030); 696 697 rge_write_mac_ocp(sc, 0xe098, 0xc302); 698 699 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 700 RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP); 701 702 RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM); 703 704 for (i = 0; i < 10; i++) { 705 if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000)) 706 break; 707 DELAY(1000); 708 } 709 710 /* Disable RXDV gate. */ 711 RGE_CLRBIT_1(sc, RGE_PPSW, 0x08); 712 DELAY(2000); 713 714 rge_ifmedia_upd(ifp); 715 716 /* Enable transmit and receive. */ 717 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB); 718 719 /* Program promiscuous mode and multicast filters. */ 720 rge_iff(sc); 721 722 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN); 723 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS); 724 725 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 726 727 /* Enable interrupts. */ 728 rge_setup_intr(sc, RGE_IMTYPE_SIM); 729 730 ifp->if_flags |= IFF_RUNNING; 731 ifq_clr_oactive(&ifp->if_snd); 732 733 timeout_add_sec(&sc->sc_timeout, 1); 734 735 return (0); 736 } 737 738 /* 739 * Stop the adapter and free any mbufs allocated to the RX and TX lists. 740 */ 741 void 742 rge_stop(struct ifnet *ifp) 743 { 744 struct rge_softc *sc = ifp->if_softc; 745 int i; 746 747 timeout_del(&sc->sc_timeout); 748 749 ifp->if_timer = 0; 750 ifp->if_flags &= ~IFF_RUNNING; 751 sc->rge_timerintr = 0; 752 753 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV | 754 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT | 755 RGE_RXCFG_ERRPKT); 756 757 RGE_WRITE_4(sc, RGE_IMR, 0); 758 RGE_WRITE_4(sc, RGE_ISR, 0xffffffff); 759 760 rge_reset(sc); 761 762 intr_barrier(sc->sc_ih); 763 ifq_barrier(&ifp->if_snd); 764 ifq_clr_oactive(&ifp->if_snd); 765 766 if (sc->rge_head != NULL) { 767 m_freem(sc->rge_head); 768 sc->rge_head = sc->rge_tail = NULL; 769 } 770 771 /* Free the TX list buffers. */ 772 for (i = 0; i < RGE_TX_LIST_CNT; i++) { 773 if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) { 774 bus_dmamap_unload(sc->sc_dmat, 775 sc->rge_ldata.rge_txq[i].txq_dmamap); 776 m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf); 777 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL; 778 } 779 } 780 781 /* Free the RX list buffers. */ 782 for (i = 0; i < RGE_RX_LIST_CNT; i++) { 783 if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) { 784 bus_dmamap_unload(sc->sc_dmat, 785 sc->rge_ldata.rge_rxq[i].rxq_dmamap); 786 m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf); 787 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL; 788 } 789 } 790 } 791 792 /* 793 * Set media options. 794 */ 795 int 796 rge_ifmedia_upd(struct ifnet *ifp) 797 { 798 struct rge_softc *sc = ifp->if_softc; 799 struct ifmedia *ifm = &sc->sc_media; 800 int anar, gig, val; 801 802 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 803 return (EINVAL); 804 805 /* Disable Gigabit Lite. */ 806 RGE_PHY_CLRBIT(sc, 0xa428, 0x0200); 807 RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001); 808 809 val = rge_read_phy_ocp(sc, 0xa5d4); 810 val &= ~RGE_ADV_2500TFDX; 811 812 anar = gig = 0; 813 switch (IFM_SUBTYPE(ifm->ifm_media)) { 814 case IFM_AUTO: 815 anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; 816 gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; 817 val |= RGE_ADV_2500TFDX; 818 break; 819 case IFM_2500_T: 820 anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; 821 gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; 822 val |= RGE_ADV_2500TFDX; 823 ifp->if_baudrate = IF_Mbps(2500); 824 break; 825 case IFM_1000_T: 826 anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; 827 gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; 828 ifp->if_baudrate = IF_Gbps(1); 829 break; 830 case IFM_100_TX: 831 anar |= ANAR_TX | ANAR_TX_FD; 832 ifp->if_baudrate = IF_Mbps(100); 833 break; 834 case IFM_10_T: 835 anar |= ANAR_10 | ANAR_10_FD; 836 ifp->if_baudrate = IF_Mbps(10); 837 break; 838 default: 839 printf("%s: unsupported media type\n", sc->sc_dev.dv_xname); 840 return (EINVAL); 841 } 842 843 rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC); 844 rge_write_phy(sc, 0, MII_100T2CR, gig); 845 rge_write_phy_ocp(sc, 0xa5d4, val); 846 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); 847 848 return (0); 849 } 850 851 /* 852 * Report current media status. 853 */ 854 void 855 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 856 { 857 struct rge_softc *sc = ifp->if_softc; 858 uint16_t status = 0; 859 860 ifmr->ifm_status = IFM_AVALID; 861 ifmr->ifm_active = IFM_ETHER; 862 863 if (rge_get_link_status(sc)) { 864 ifmr->ifm_status |= IFM_ACTIVE; 865 866 status = RGE_READ_2(sc, RGE_PHYSTAT); 867 if ((status & RGE_PHYSTAT_FDX) || 868 (status & RGE_PHYSTAT_2500MBPS)) 869 ifmr->ifm_active |= IFM_FDX; 870 else 871 ifmr->ifm_active |= IFM_HDX; 872 873 if (status & RGE_PHYSTAT_10MBPS) 874 ifmr->ifm_active |= IFM_10_T; 875 else if (status & RGE_PHYSTAT_100MBPS) 876 ifmr->ifm_active |= IFM_100_TX; 877 else if (status & RGE_PHYSTAT_1000MBPS) 878 ifmr->ifm_active |= IFM_1000_T; 879 else if (status & RGE_PHYSTAT_2500MBPS) 880 ifmr->ifm_active |= IFM_2500_T; 881 } 882 } 883 884 /* 885 * Allocate memory for RX/TX rings. 886 */ 887 int 888 rge_allocmem(struct rge_softc *sc) 889 { 890 int error, i; 891 892 /* Allocate DMA'able memory for the TX ring. */ 893 error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1, 894 RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map); 895 if (error) { 896 printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname); 897 return (error); 898 } 899 error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0, 900 &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg, 901 BUS_DMA_NOWAIT| BUS_DMA_ZERO); 902 if (error) { 903 printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname); 904 return (error); 905 } 906 907 /* Load the map for the TX ring. */ 908 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg, 909 sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ, 910 (caddr_t *)&sc->rge_ldata.rge_tx_list, 911 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 912 if (error) { 913 printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname); 914 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg, 915 sc->rge_ldata.rge_tx_listnseg); 916 return (error); 917 } 918 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 919 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 920 if (error) { 921 printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname); 922 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map); 923 bus_dmamem_unmap(sc->sc_dmat, 924 (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ); 925 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg, 926 sc->rge_ldata.rge_tx_listnseg); 927 return (error); 928 } 929 930 /* Create DMA maps for TX buffers. */ 931 for (i = 0; i < RGE_TX_LIST_CNT; i++) { 932 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 933 RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0, 934 &sc->rge_ldata.rge_txq[i].txq_dmamap); 935 if (error) { 936 printf("%s: can't create DMA map for TX\n", 937 sc->sc_dev.dv_xname); 938 return (error); 939 } 940 } 941 942 /* Allocate DMA'able memory for the RX ring. */ 943 error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1, 944 RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map); 945 if (error) { 946 printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname); 947 return (error); 948 } 949 error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0, 950 &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg, 951 BUS_DMA_NOWAIT| BUS_DMA_ZERO); 952 if (error) { 953 printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname); 954 return (error); 955 } 956 957 /* Load the map for the RX ring. */ 958 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg, 959 sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ, 960 (caddr_t *)&sc->rge_ldata.rge_rx_list, 961 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 962 if (error) { 963 printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname); 964 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg, 965 sc->rge_ldata.rge_rx_listnseg); 966 return (error); 967 } 968 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map, 969 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 970 if (error) { 971 printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname); 972 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map); 973 bus_dmamem_unmap(sc->sc_dmat, 974 (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ); 975 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg, 976 sc->rge_ldata.rge_rx_listnseg); 977 return (error); 978 } 979 980 /* Create DMA maps for RX buffers. */ 981 for (i = 0; i < RGE_RX_LIST_CNT; i++) { 982 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1, 983 RGE_JUMBO_FRAMELEN, 0, 0, 984 &sc->rge_ldata.rge_rxq[i].rxq_dmamap); 985 if (error) { 986 printf("%s: can't create DMA map for RX\n", 987 sc->sc_dev.dv_xname); 988 return (error); 989 } 990 } 991 992 return (error); 993 } 994 995 /* 996 * Initialize the RX descriptor and attach an mbuf cluster. 997 */ 998 int 999 rge_newbuf(struct rge_softc *sc, int idx) 1000 { 1001 struct mbuf *m; 1002 struct rge_rx_desc *r; 1003 struct rge_rxq *rxq; 1004 bus_dmamap_t rxmap; 1005 1006 m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN); 1007 if (m == NULL) 1008 return (ENOBUFS); 1009 1010 m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN; 1011 1012 rxq = &sc->rge_ldata.rge_rxq[idx]; 1013 rxmap = rxq->rxq_dmamap; 1014 1015 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) 1016 goto out; 1017 1018 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize, 1019 BUS_DMASYNC_PREREAD); 1020 1021 /* Map the segments into RX descriptors. */ 1022 r = &sc->rge_ldata.rge_rx_list[idx]; 1023 1024 if (RGE_OWN(r)) { 1025 printf("%s: tried to map busy RX descriptor\n", 1026 sc->sc_dev.dv_xname); 1027 goto out; 1028 } 1029 1030 rxq->rxq_mbuf = m; 1031 1032 r->rge_extsts = 0; 1033 r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr)); 1034 r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr)); 1035 1036 r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len); 1037 if (idx == RGE_RX_LIST_CNT - 1) 1038 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR); 1039 1040 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN); 1041 1042 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map, 1043 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc), 1044 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1045 1046 return (0); 1047 out: 1048 if (m != NULL) 1049 m_freem(m); 1050 return (ENOMEM); 1051 } 1052 1053 void 1054 rge_discard_rxbuf(struct rge_softc *sc, int idx) 1055 { 1056 struct rge_rx_desc *r; 1057 1058 r = &sc->rge_ldata.rge_rx_list[idx]; 1059 1060 r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN); 1061 r->rge_extsts = 0; 1062 if (idx == RGE_RX_LIST_CNT - 1) 1063 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR); 1064 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN); 1065 1066 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map, 1067 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc), 1068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1069 } 1070 1071 int 1072 rge_rx_list_init(struct rge_softc *sc) 1073 { 1074 int i; 1075 1076 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ); 1077 1078 for (i = 0; i < RGE_RX_LIST_CNT; i++) { 1079 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL; 1080 if (rge_newbuf(sc, i) == ENOBUFS) 1081 return (ENOBUFS); 1082 } 1083 1084 sc->rge_ldata.rge_rxq_prodidx = 0; 1085 sc->rge_head = sc->rge_tail = NULL; 1086 1087 return (0); 1088 } 1089 1090 void 1091 rge_tx_list_init(struct rge_softc *sc) 1092 { 1093 int i; 1094 1095 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ); 1096 1097 for (i = 0; i < RGE_TX_LIST_CNT; i++) 1098 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL; 1099 1100 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0, 1101 sc->rge_ldata.rge_tx_list_map->dm_mapsize, 1102 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1103 1104 sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0; 1105 } 1106 1107 int 1108 rge_rxeof(struct rge_softc *sc) 1109 { 1110 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1111 struct mbuf *m; 1112 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1113 struct rge_rx_desc *cur_rx; 1114 struct rge_rxq *rxq; 1115 uint32_t rxstat, extsts; 1116 int i, total_len, rx = 0; 1117 1118 for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) { 1119 /* Invalidate the descriptor memory. */ 1120 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map, 1121 i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc), 1122 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1123 1124 cur_rx = &sc->rge_ldata.rge_rx_list[i]; 1125 1126 if (RGE_OWN(cur_rx)) 1127 break; 1128 1129 rxstat = letoh32(cur_rx->rge_cmdsts); 1130 extsts = letoh32(cur_rx->rge_extsts); 1131 1132 total_len = RGE_RXBYTES(cur_rx); 1133 rxq = &sc->rge_ldata.rge_rxq[i]; 1134 m = rxq->rxq_mbuf; 1135 rx = 1; 1136 1137 /* Invalidate the RX mbuf and unload its map. */ 1138 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0, 1139 rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1140 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap); 1141 1142 if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) != 1143 (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) { 1144 rge_discard_rxbuf(sc, i); 1145 continue; 1146 } 1147 1148 if (rxstat & RGE_RDCMDSTS_RXERRSUM) { 1149 ifp->if_ierrors++; 1150 /* 1151 * If this is part of a multi-fragment packet, 1152 * discard all the pieces. 1153 */ 1154 if (sc->rge_head != NULL) { 1155 m_freem(sc->rge_head); 1156 sc->rge_head = sc->rge_tail = NULL; 1157 } 1158 rge_discard_rxbuf(sc, i); 1159 continue; 1160 } 1161 1162 /* 1163 * If allocating a replacement mbuf fails, 1164 * reload the current one. 1165 */ 1166 1167 if (rge_newbuf(sc, i) == ENOBUFS) { 1168 if (sc->rge_head != NULL) { 1169 m_freem(sc->rge_head); 1170 sc->rge_head = sc->rge_tail = NULL; 1171 } 1172 rge_discard_rxbuf(sc, i); 1173 continue; 1174 } 1175 1176 if (sc->rge_head != NULL) { 1177 m->m_len = total_len; 1178 /* 1179 * Special case: if there's 4 bytes or less 1180 * in this buffer, the mbuf can be discarded: 1181 * the last 4 bytes is the CRC, which we don't 1182 * care about anyway. 1183 */ 1184 if (m->m_len <= ETHER_CRC_LEN) { 1185 sc->rge_tail->m_len -= 1186 (ETHER_CRC_LEN - m->m_len); 1187 m_freem(m); 1188 } else { 1189 m->m_len -= ETHER_CRC_LEN; 1190 m->m_flags &= ~M_PKTHDR; 1191 sc->rge_tail->m_next = m; 1192 } 1193 m = sc->rge_head; 1194 sc->rge_head = sc->rge_tail = NULL; 1195 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1196 } else 1197 m->m_pkthdr.len = m->m_len = 1198 (total_len - ETHER_CRC_LEN); 1199 1200 /* Check IP header checksum. */ 1201 if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) && 1202 (extsts & RGE_RDEXTSTS_IPV4)) 1203 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1204 1205 /* Check TCP/UDP checksum. */ 1206 if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) && 1207 (((rxstat & RGE_RDCMDSTS_TCPPKT) && 1208 !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) || 1209 ((rxstat & RGE_RDCMDSTS_UDPPKT) && 1210 !(rxstat & RGE_RDCMDSTS_UDPCSUMERR)))) 1211 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 1212 M_UDP_CSUM_IN_OK; 1213 1214 #if NVLAN > 0 1215 if (extsts & RGE_RDEXTSTS_VTAG) { 1216 m->m_pkthdr.ether_vtag = 1217 ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK); 1218 m->m_flags |= M_VLANTAG; 1219 } 1220 #endif 1221 1222 ml_enqueue(&ml, m); 1223 } 1224 1225 sc->rge_ldata.rge_rxq_prodidx = i; 1226 1227 if_input(ifp, &ml); 1228 1229 return (rx); 1230 } 1231 1232 int 1233 rge_txeof(struct rge_softc *sc) 1234 { 1235 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1236 struct rge_txq *txq; 1237 uint32_t txstat; 1238 int cons, idx, prod; 1239 int free = 0; 1240 1241 prod = sc->rge_ldata.rge_txq_prodidx; 1242 cons = sc->rge_ldata.rge_txq_considx; 1243 1244 while (prod != cons) { 1245 txq = &sc->rge_ldata.rge_txq[cons]; 1246 idx = txq->txq_descidx; 1247 1248 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 1249 idx * sizeof(struct rge_tx_desc), 1250 sizeof(struct rge_tx_desc), 1251 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1252 1253 txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts); 1254 1255 if (txstat & RGE_TDCMDSTS_OWN) { 1256 free = 2; 1257 break; 1258 } 1259 1260 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0, 1261 txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1262 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); 1263 m_freem(txq->txq_mbuf); 1264 txq->txq_mbuf = NULL; 1265 1266 if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL)) 1267 ifp->if_collisions++; 1268 if (txstat & RGE_TDCMDSTS_TXERR) 1269 ifp->if_oerrors++; 1270 1271 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 1272 idx * sizeof(struct rge_tx_desc), 1273 sizeof(struct rge_tx_desc), 1274 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1275 1276 cons = RGE_NEXT_TX_DESC(idx); 1277 free = 1; 1278 } 1279 1280 if (free == 0) 1281 return (0); 1282 1283 sc->rge_ldata.rge_txq_considx = cons; 1284 1285 if (ifq_is_oactive(&ifp->if_snd)) 1286 ifq_restart(&ifp->if_snd); 1287 else if (free == 2) 1288 ifq_serialize(&ifp->if_snd, &sc->sc_task); 1289 else 1290 ifp->if_timer = 0; 1291 1292 return (1); 1293 } 1294 1295 void 1296 rge_reset(struct rge_softc *sc) 1297 { 1298 int i; 1299 1300 /* Enable RXDV gate. */ 1301 RGE_SETBIT_1(sc, RGE_PPSW, 0x08); 1302 DELAY(2000); 1303 1304 for (i = 0; i < 10; i++) { 1305 DELAY(100); 1306 if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY | 1307 RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY | 1308 RGE_MCUCMD_TXFIFO_EMPTY)) 1309 break; 1310 } 1311 1312 /* Soft reset. */ 1313 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET); 1314 1315 for (i = 0; i < RGE_TIMEOUT; i++) { 1316 DELAY(100); 1317 if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET)) 1318 break; 1319 } 1320 if (i == RGE_TIMEOUT) 1321 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 1322 } 1323 1324 void 1325 rge_iff(struct rge_softc *sc) 1326 { 1327 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1328 struct arpcom *ac = &sc->sc_arpcom; 1329 struct ether_multi *enm; 1330 struct ether_multistep step; 1331 uint32_t hashes[2]; 1332 uint32_t rxfilt; 1333 int h = 0; 1334 1335 rxfilt = RGE_READ_4(sc, RGE_RXCFG); 1336 rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI); 1337 ifp->if_flags &= ~IFF_ALLMULTI; 1338 1339 /* 1340 * Always accept frames destined to our station address. 1341 * Always accept broadcast frames. 1342 */ 1343 rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD; 1344 1345 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1346 ifp->if_flags |= IFF_ALLMULTI; 1347 rxfilt |= RGE_RXCFG_MULTI; 1348 if (ifp->if_flags & IFF_PROMISC) 1349 rxfilt |= RGE_RXCFG_ALLPHYS; 1350 hashes[0] = hashes[1] = 0xffffffff; 1351 } else { 1352 rxfilt |= RGE_RXCFG_MULTI; 1353 /* Program new filter. */ 1354 memset(hashes, 0, sizeof(hashes)); 1355 1356 ETHER_FIRST_MULTI(step, ac, enm); 1357 while (enm != NULL) { 1358 h = ether_crc32_be(enm->enm_addrlo, 1359 ETHER_ADDR_LEN) >> 26; 1360 1361 if (h < 32) 1362 hashes[0] |= (1 << h); 1363 else 1364 hashes[1] |= (1 << (h - 32)); 1365 1366 ETHER_NEXT_MULTI(step, enm); 1367 } 1368 } 1369 1370 RGE_WRITE_4(sc, RGE_RXCFG, rxfilt); 1371 RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1])); 1372 RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0])); 1373 } 1374 1375 void 1376 rge_set_phy_power(struct rge_softc *sc, int on) 1377 { 1378 int i; 1379 1380 if (on) { 1381 RGE_SETBIT_1(sc, RGE_PMCH, 0xc0); 1382 1383 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN); 1384 1385 for (i = 0; i < RGE_TIMEOUT; i++) { 1386 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3) 1387 break; 1388 DELAY(1000); 1389 } 1390 } else 1391 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN); 1392 } 1393 1394 void 1395 rge_phy_config(struct rge_softc *sc) 1396 { 1397 uint16_t mcode_ver, val; 1398 int i; 1399 static const uint16_t mac_cfg3_a438_value[] = 1400 { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff, 1401 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 }; 1402 1403 static const uint16_t mac_cfg3_b88e_value[] = 1404 { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b, 1405 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224, 1406 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2, 1407 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3, 1408 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050, 1409 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 }; 1410 1411 /* Read microcode version. */ 1412 rge_write_phy_ocp(sc, 0xa436, 0x801e); 1413 mcode_ver = rge_read_phy_ocp(sc, 0xa438); 1414 1415 if (sc->rge_type == MAC_CFG2) { 1416 for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) { 1417 rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg, 1418 rtl8125_mac_cfg2_ephy[i].val); 1419 } 1420 1421 if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) { 1422 /* Disable PHY config. */ 1423 RGE_CLRBIT_1(sc, 0xf2, 0x20); 1424 DELAY(1000); 1425 1426 rge_patch_phy_mcu(sc, 1); 1427 1428 rge_write_phy_ocp(sc, 0xa436, 0x8024); 1429 rge_write_phy_ocp(sc, 0xa438, 0x8600); 1430 rge_write_phy_ocp(sc, 0xa436, 0xb82e); 1431 rge_write_phy_ocp(sc, 0xa438, 0x0001); 1432 1433 RGE_PHY_SETBIT(sc, 0xb820, 0x0080); 1434 for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) { 1435 rge_write_phy_ocp(sc, 1436 rtl8125_mac_cfg2_mcu[i].reg, 1437 rtl8125_mac_cfg2_mcu[i].val); 1438 } 1439 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080); 1440 1441 rge_write_phy_ocp(sc, 0xa436, 0); 1442 rge_write_phy_ocp(sc, 0xa438, 0); 1443 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001); 1444 rge_write_phy_ocp(sc, 0xa436, 0x8024); 1445 rge_write_phy_ocp(sc, 0xa438, 0); 1446 1447 rge_patch_phy_mcu(sc, 0); 1448 1449 /* Enable PHY config. */ 1450 RGE_SETBIT_1(sc, 0xf2, 0x20); 1451 1452 /* Write microcode version. */ 1453 rge_write_phy_ocp(sc, 0xa436, 0x801e); 1454 rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER); 1455 } 1456 1457 val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff; 1458 rge_write_phy_ocp(sc, 0xad40, val | 0x0084); 1459 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010); 1460 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff; 1461 rge_write_phy_ocp(sc, 0xad16, val | 0x0006); 1462 val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff; 1463 rge_write_phy_ocp(sc, 0xad32, val | 0x0006); 1464 RGE_PHY_CLRBIT(sc, 0xac08, 0x1100); 1465 val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000; 1466 rge_write_phy_ocp(sc, 0xac8a, val | 0x7000); 1467 RGE_PHY_SETBIT(sc, 0xad18, 0x0400); 1468 RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff); 1469 RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff); 1470 1471 rge_write_phy_ocp(sc, 0xa436, 0x80ea); 1472 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1473 rge_write_phy_ocp(sc, 0xa438, val | 0xc400); 1474 rge_write_phy_ocp(sc, 0xa436, 0x80eb); 1475 val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700; 1476 rge_write_phy_ocp(sc, 0xa438, val | 0x0300); 1477 rge_write_phy_ocp(sc, 0xa436, 0x80f8); 1478 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1479 rge_write_phy_ocp(sc, 0xa438, val | 0x1c00); 1480 rge_write_phy_ocp(sc, 0xa436, 0x80f1); 1481 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1482 rge_write_phy_ocp(sc, 0xa438, val | 0x3000); 1483 rge_write_phy_ocp(sc, 0xa436, 0x80fe); 1484 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1485 rge_write_phy_ocp(sc, 0xa438, val | 0xa500); 1486 rge_write_phy_ocp(sc, 0xa436, 0x8102); 1487 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1488 rge_write_phy_ocp(sc, 0xa438, val | 0x5000); 1489 rge_write_phy_ocp(sc, 0xa436, 0x8105); 1490 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1491 rge_write_phy_ocp(sc, 0xa438, val | 0x3300); 1492 rge_write_phy_ocp(sc, 0xa436, 0x8100); 1493 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1494 rge_write_phy_ocp(sc, 0xa438, val | 0x7000); 1495 rge_write_phy_ocp(sc, 0xa436, 0x8104); 1496 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1497 rge_write_phy_ocp(sc, 0xa438, val | 0xf000); 1498 rge_write_phy_ocp(sc, 0xa436, 0x8106); 1499 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1500 rge_write_phy_ocp(sc, 0xa438, val | 0x6500); 1501 rge_write_phy_ocp(sc, 0xa436, 0x80dc); 1502 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00; 1503 rge_write_phy_ocp(sc, 0xa438, val | 0xed00); 1504 rge_write_phy_ocp(sc, 0xa436, 0x80df); 1505 RGE_PHY_SETBIT(sc, 0xa438, 0x0100); 1506 rge_write_phy_ocp(sc, 0xa436, 0x80e1); 1507 RGE_PHY_CLRBIT(sc, 0xa438, 0x0100); 1508 val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f; 1509 rge_write_phy_ocp(sc, 0xbf06, val | 0x0038); 1510 rge_write_phy_ocp(sc, 0xa436, 0x819f); 1511 rge_write_phy_ocp(sc, 0xa438, 0xd0b6); 1512 rge_write_phy_ocp(sc, 0xbc34, 0x5555); 1513 val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00; 1514 rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00); 1515 RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400); 1516 RGE_PHY_SETBIT(sc, 0xa442, 0x0800); 1517 } else { 1518 for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++) 1519 rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg, 1520 rtl8125_mac_cfg3_ephy[i].val); 1521 1522 if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) { 1523 /* Disable PHY config. */ 1524 RGE_CLRBIT_1(sc, 0xf2, 0x20); 1525 DELAY(1000); 1526 1527 rge_patch_phy_mcu(sc, 1); 1528 1529 rge_write_phy_ocp(sc, 0xa436, 0x8024); 1530 rge_write_phy_ocp(sc, 0xa438, 0x8601); 1531 rge_write_phy_ocp(sc, 0xa436, 0xb82e); 1532 rge_write_phy_ocp(sc, 0xa438, 0x0001); 1533 1534 RGE_PHY_SETBIT(sc, 0xb820, 0x0080); 1535 for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) { 1536 rge_write_phy_ocp(sc, 1537 rtl8125_mac_cfg3_mcu[i].reg, 1538 rtl8125_mac_cfg3_mcu[i].val); 1539 } 1540 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080); 1541 1542 rge_write_phy_ocp(sc, 0xa436, 0); 1543 rge_write_phy_ocp(sc, 0xa438, 0); 1544 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001); 1545 rge_write_phy_ocp(sc, 0xa436, 0x8024); 1546 rge_write_phy_ocp(sc, 0xa438, 0); 1547 1548 rge_patch_phy_mcu(sc, 0); 1549 1550 /* Enable PHY config. */ 1551 RGE_SETBIT_1(sc, 0xf2, 0x20); 1552 1553 /* Write microcode version. */ 1554 rge_write_phy_ocp(sc, 0xa436, 0x801e); 1555 rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER); 1556 } 1557 1558 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010); 1559 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff; 1560 rge_write_phy_ocp(sc, 0xad16, val | 0x03ff); 1561 val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f; 1562 rge_write_phy_ocp(sc, 0xad32, val | 0x0006); 1563 RGE_PHY_CLRBIT(sc, 0xac08, 0x1000); 1564 RGE_PHY_CLRBIT(sc, 0xac08, 0x0100); 1565 val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003; 1566 rge_write_phy_ocp(sc, 0xacc0, val | 0x0002); 1567 val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0; 1568 rge_write_phy_ocp(sc, 0xad40, val | 0x0040); 1569 val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007; 1570 rge_write_phy_ocp(sc, 0xad40, val | 0x0004); 1571 RGE_PHY_CLRBIT(sc, 0xac14, 0x0080); 1572 RGE_PHY_CLRBIT(sc, 0xac80, 0x0300); 1573 val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007; 1574 rge_write_phy_ocp(sc, 0xac5e, val | 0x0002); 1575 rge_write_phy_ocp(sc, 0xad4c, 0x00a8); 1576 rge_write_phy_ocp(sc, 0xac5c, 0x01ff); 1577 val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0; 1578 rge_write_phy_ocp(sc, 0xac8a, val | 0x0030); 1579 rge_write_phy_ocp(sc, 0xb87c, 0x80a2); 1580 rge_write_phy_ocp(sc, 0xb87e, 0x0153); 1581 rge_write_phy_ocp(sc, 0xb87c, 0x809c); 1582 rge_write_phy_ocp(sc, 0xb87e, 0x0153); 1583 1584 rge_write_phy_ocp(sc, 0xa436, 0x81b3); 1585 for (i = 0; i < nitems(mac_cfg3_a438_value); i++) 1586 rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]); 1587 for (i = 0; i < 26; i++) 1588 rge_write_phy_ocp(sc, 0xa438, 0); 1589 rge_write_phy_ocp(sc, 0xa436, 0x8257); 1590 rge_write_phy_ocp(sc, 0xa438, 0x020f); 1591 rge_write_phy_ocp(sc, 0xa436, 0x80ea); 1592 rge_write_phy_ocp(sc, 0xa438, 0x7843); 1593 1594 rge_patch_phy_mcu(sc, 1); 1595 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001); 1596 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00); 1597 for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) { 1598 rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]); 1599 rge_write_phy_ocp(sc, 0xb890, 1600 mac_cfg3_b88e_value[i + 1]); 1601 } 1602 RGE_PHY_SETBIT(sc, 0xb896, 0x0001); 1603 rge_patch_phy_mcu(sc, 0); 1604 1605 RGE_PHY_SETBIT(sc, 0xd068, 0x2000); 1606 rge_write_phy_ocp(sc, 0xa436, 0x81a2); 1607 RGE_PHY_SETBIT(sc, 0xa438, 0x0100); 1608 val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00; 1609 rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00); 1610 RGE_PHY_CLRBIT(sc, 0xa454, 0x0001); 1611 RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020); 1612 RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010); 1613 RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001); 1614 RGE_PHY_SETBIT(sc, 0xa442, 0x0800); 1615 } 1616 1617 /* Disable EEE. */ 1618 RGE_MAC_CLRBIT(sc, 0xe040, 0x0003); 1619 RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006); 1620 RGE_PHY_CLRBIT(sc, 0xa432, 0x0010); 1621 RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006); 1622 RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001); 1623 RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010); 1624 RGE_PHY_CLRBIT(sc, 0xa428, 0x0080); 1625 RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200); 1626 1627 rge_patch_phy_mcu(sc, 1); 1628 RGE_MAC_CLRBIT(sc, 0xe052, 0x0001); 1629 RGE_PHY_CLRBIT(sc, 0xa442, 0x3000); 1630 RGE_PHY_CLRBIT(sc, 0xa430, 0x8000); 1631 rge_patch_phy_mcu(sc, 0); 1632 } 1633 1634 void 1635 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr) 1636 { 1637 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 1638 RGE_WRITE_4(sc, RGE_MAC0, 1639 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1640 RGE_WRITE_4(sc, RGE_MAC4, 1641 addr[5] << 8 | addr[4]); 1642 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 1643 } 1644 1645 void 1646 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr) 1647 { 1648 *(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0); 1649 *(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1); 1650 } 1651 1652 void 1653 rge_hw_init(struct rge_softc *sc) 1654 { 1655 int i; 1656 1657 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 1658 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS); 1659 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN); 1660 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 1661 RGE_CLRBIT_1(sc, 0xf1, 0x80); 1662 1663 /* Disable UPS. */ 1664 RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010); 1665 1666 /* Configure MAC MCU. */ 1667 rge_write_mac_ocp(sc, 0xfc38, 0); 1668 1669 for (i = 0xfc28; i < 0xfc38; i += 2) 1670 rge_write_mac_ocp(sc, i, 0); 1671 1672 DELAY(3000); 1673 rge_write_mac_ocp(sc, 0xfc26, 0); 1674 1675 if (sc->rge_type == MAC_CFG3) { 1676 for (i = 0; i < nitems(rtl8125_def_bps); i++) 1677 rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg, 1678 rtl8125_def_bps[i].val); 1679 } 1680 1681 /* Disable PHY power saving. */ 1682 rge_disable_phy_ocp_pwrsave(sc); 1683 1684 /* Set PCIe uncorrectable error status. */ 1685 rge_write_csi(sc, 0x108, 1686 rge_read_csi(sc, 0x108) | 0x00100000); 1687 } 1688 1689 void 1690 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc) 1691 { 1692 if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) { 1693 rge_patch_phy_mcu(sc, 1); 1694 rge_write_phy_ocp(sc, 0xc416, 0); 1695 rge_write_phy_ocp(sc, 0xc416, 0x0500); 1696 rge_patch_phy_mcu(sc, 0); 1697 } 1698 } 1699 1700 void 1701 rge_patch_phy_mcu(struct rge_softc *sc, int set) 1702 { 1703 uint16_t val; 1704 int i; 1705 1706 if (set) 1707 RGE_PHY_SETBIT(sc, 0xb820, 0x0010); 1708 else 1709 RGE_PHY_CLRBIT(sc, 0xb820, 0x0010); 1710 1711 for (i = 0; i < 1000; i++) { 1712 val = rge_read_phy_ocp(sc, 0xb800) & 0x0040; 1713 DELAY(100); 1714 if (val == 0x0040) 1715 break; 1716 } 1717 if (i == 1000) 1718 printf("%s: timeout waiting to patch phy mcu\n", 1719 sc->sc_dev.dv_xname); 1720 } 1721 1722 void 1723 rge_add_media_types(struct rge_softc *sc) 1724 { 1725 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL); 1726 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 1727 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL); 1728 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 1729 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL); 1730 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1731 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL); 1732 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL); 1733 } 1734 1735 void 1736 rge_config_imtype(struct rge_softc *sc, int imtype) 1737 { 1738 switch (imtype) { 1739 case RGE_IMTYPE_NONE: 1740 sc->rge_intrs = RGE_INTRS; 1741 sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL | 1742 RGE_ISR_RX_FIFO_OFLOW; 1743 sc->rge_tx_ack = RGE_ISR_TX_OK; 1744 break; 1745 case RGE_IMTYPE_SIM: 1746 sc->rge_intrs = RGE_INTRS_TIMER; 1747 sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT; 1748 sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT; 1749 break; 1750 default: 1751 panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype); 1752 } 1753 } 1754 1755 void 1756 rge_disable_sim_im(struct rge_softc *sc) 1757 { 1758 RGE_WRITE_4(sc, RGE_TIMERINT, 0); 1759 sc->rge_timerintr = 0; 1760 } 1761 1762 void 1763 rge_setup_sim_im(struct rge_softc *sc) 1764 { 1765 RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600); 1766 RGE_WRITE_4(sc, RGE_TIMERCNT, 1); 1767 sc->rge_timerintr = 1; 1768 } 1769 1770 void 1771 rge_setup_intr(struct rge_softc *sc, int imtype) 1772 { 1773 rge_config_imtype(sc, imtype); 1774 1775 /* Enable interrupts. */ 1776 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs); 1777 1778 switch (imtype) { 1779 case RGE_IMTYPE_NONE: 1780 rge_disable_sim_im(sc); 1781 break; 1782 case RGE_IMTYPE_SIM: 1783 rge_setup_sim_im(sc); 1784 break; 1785 default: 1786 panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype); 1787 } 1788 } 1789 1790 void 1791 rge_exit_oob(struct rge_softc *sc) 1792 { 1793 int i; 1794 1795 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV | 1796 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT | 1797 RGE_RXCFG_ERRPKT); 1798 1799 /* Disable RealWoW. */ 1800 rge_write_mac_ocp(sc, 0xc0bc, 0x00ff); 1801 1802 rge_reset(sc); 1803 1804 /* Disable OOB. */ 1805 RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB); 1806 1807 RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000); 1808 1809 for (i = 0; i < 10; i++) { 1810 DELAY(100); 1811 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200) 1812 break; 1813 } 1814 1815 rge_write_mac_ocp(sc, 0xc0aa, 0x07d0); 1816 rge_write_mac_ocp(sc, 0xc0a6, 0x0150); 1817 rge_write_mac_ocp(sc, 0xc01e, 0x5555); 1818 1819 for (i = 0; i < 10; i++) { 1820 DELAY(100); 1821 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200) 1822 break; 1823 } 1824 1825 if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) { 1826 for (i = 0; i < RGE_TIMEOUT; i++) { 1827 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2) 1828 break; 1829 DELAY(1000); 1830 } 1831 RGE_MAC_CLRBIT(sc, 0xd408, 0x0100); 1832 RGE_PHY_CLRBIT(sc, 0xa468, 0x000a); 1833 } 1834 } 1835 1836 void 1837 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val) 1838 { 1839 int i; 1840 1841 RGE_WRITE_4(sc, RGE_CSIDR, val); 1842 RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) | 1843 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY); 1844 1845 for (i = 0; i < 10; i++) { 1846 DELAY(100); 1847 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)) 1848 break; 1849 } 1850 1851 DELAY(20); 1852 } 1853 1854 uint32_t 1855 rge_read_csi(struct rge_softc *sc, uint32_t reg) 1856 { 1857 int i; 1858 1859 RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) | 1860 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT)); 1861 1862 for (i = 0; i < 10; i++) { 1863 DELAY(100); 1864 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY) 1865 break; 1866 } 1867 1868 DELAY(20); 1869 1870 return (RGE_READ_4(sc, RGE_CSIDR)); 1871 } 1872 1873 void 1874 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val) 1875 { 1876 uint32_t tmp; 1877 1878 tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT; 1879 tmp += val; 1880 tmp |= RGE_MACOCP_BUSY; 1881 RGE_WRITE_4(sc, RGE_MACOCP, tmp); 1882 } 1883 1884 uint16_t 1885 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg) 1886 { 1887 uint32_t val; 1888 1889 val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT; 1890 RGE_WRITE_4(sc, RGE_MACOCP, val); 1891 1892 return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK); 1893 } 1894 1895 void 1896 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val) 1897 { 1898 uint32_t tmp; 1899 int i; 1900 1901 tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT; 1902 tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK); 1903 RGE_WRITE_4(sc, RGE_EPHYAR, tmp); 1904 1905 for (i = 0; i < 10; i++) { 1906 DELAY(100); 1907 if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY)) 1908 break; 1909 } 1910 1911 DELAY(20); 1912 } 1913 1914 void 1915 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val) 1916 { 1917 uint16_t off, phyaddr; 1918 1919 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8); 1920 phyaddr <<= 4; 1921 1922 off = addr ? reg : 0x10 + (reg % 8); 1923 1924 phyaddr += (off - 16) << 1; 1925 1926 rge_write_phy_ocp(sc, phyaddr, val); 1927 } 1928 1929 void 1930 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val) 1931 { 1932 uint32_t tmp; 1933 int i; 1934 1935 tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT; 1936 tmp |= RGE_PHYOCP_BUSY | val; 1937 RGE_WRITE_4(sc, RGE_PHYOCP, tmp); 1938 1939 for (i = 0; i < RGE_TIMEOUT; i++) { 1940 DELAY(1); 1941 if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY)) 1942 break; 1943 } 1944 } 1945 1946 uint16_t 1947 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg) 1948 { 1949 uint32_t val; 1950 int i; 1951 1952 val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT; 1953 RGE_WRITE_4(sc, RGE_PHYOCP, val); 1954 1955 for (i = 0; i < RGE_TIMEOUT; i++) { 1956 DELAY(1); 1957 val = RGE_READ_4(sc, RGE_PHYOCP); 1958 if (val & RGE_PHYOCP_BUSY) 1959 break; 1960 } 1961 1962 return (val & RGE_PHYOCP_DATA_MASK); 1963 } 1964 1965 int 1966 rge_get_link_status(struct rge_softc *sc) 1967 { 1968 return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0); 1969 } 1970 1971 void 1972 rge_txstart(void *arg) 1973 { 1974 struct rge_softc *sc = arg; 1975 1976 RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START); 1977 } 1978 1979 void 1980 rge_tick(void *arg) 1981 { 1982 struct rge_softc *sc = arg; 1983 int s; 1984 1985 s = splnet(); 1986 rge_link_state(sc); 1987 splx(s); 1988 1989 timeout_add_sec(&sc->sc_timeout, 1); 1990 } 1991 1992 void 1993 rge_link_state(struct rge_softc *sc) 1994 { 1995 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1996 int link = LINK_STATE_DOWN; 1997 1998 if (rge_get_link_status(sc)) 1999 link = LINK_STATE_UP; 2000 2001 if (ifp->if_link_state != link) { 2002 ifp->if_link_state = link; 2003 if_link_state_change(ifp); 2004 } 2005 } 2006