1 /* $NetBSD: if_enet.c,v 1.33 2020/12/31 02:16:14 uwe Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.33 2020/12/31 02:16:14 uwe Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static void enet_tick(void *); 136 static int enet_tx_intr(void *); 137 static int enet_rx_intr(void *); 138 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 139 int); 140 141 static void enet_start(struct ifnet *); 142 static int enet_ifflags_cb(struct ethercom *); 143 static int enet_ioctl(struct ifnet *, u_long, void *); 144 static int enet_init(struct ifnet *); 145 static void enet_stop(struct ifnet *, int); 146 static void enet_watchdog(struct ifnet *); 147 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 148 149 static int enet_miibus_readreg(device_t, int, int, uint16_t *); 150 static int enet_miibus_writereg(device_t, int, int, uint16_t); 151 static void enet_miibus_statchg(struct ifnet *); 152 153 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 154 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_setmulti(struct enet_softc *); 156 static int enet_encap_mbufalign(struct mbuf **); 157 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 158 static int enet_init_regs(struct enet_softc *, int); 159 static int enet_alloc_ring(struct enet_softc *); 160 static void enet_init_txring(struct enet_softc *); 161 static int enet_init_rxring(struct enet_softc *); 162 static void enet_reset_rxdesc(struct enet_softc *, int); 163 static int enet_alloc_rxbuf(struct enet_softc *, int); 164 static void enet_drain_txbuf(struct enet_softc *); 165 static void enet_drain_rxbuf(struct enet_softc *); 166 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 167 bus_dmamap_t *); 168 169 int 170 enet_attach_common(device_t self) 171 { 172 struct enet_softc *sc = device_private(self); 173 struct ifnet *ifp; 174 struct mii_data * const mii = &sc->sc_mii; 175 176 /* allocate dma buffer */ 177 if (enet_alloc_ring(sc)) 178 return -1; 179 180 #define IS_ENADDR_ZERO(enaddr) \ 181 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 182 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 183 184 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 185 /* by any chance, mac-address is already set by bootloader? */ 186 enet_gethwaddr(sc, sc->sc_enaddr); 187 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 188 /* give up. set randomly */ 189 uint32_t eaddr = random(); 190 /* not multicast */ 191 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 192 sc->sc_enaddr[1] = eaddr >> 16; 193 sc->sc_enaddr[2] = eaddr >> 8; 194 sc->sc_enaddr[3] = eaddr; 195 eaddr = random(); 196 sc->sc_enaddr[4] = eaddr >> 8; 197 sc->sc_enaddr[5] = eaddr; 198 199 aprint_error_dev(self, 200 "cannot get mac address. set randomly\n"); 201 } 202 } 203 enet_sethwaddr(sc, sc->sc_enaddr); 204 205 aprint_normal_dev(self, "Ethernet address %s\n", 206 ether_sprintf(sc->sc_enaddr)); 207 208 enet_init_regs(sc, 1); 209 210 /* callout will be scheduled from enet_init() */ 211 callout_init(&sc->sc_tick_ch, 0); 212 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 213 214 /* setup ifp */ 215 ifp = &sc->sc_ethercom.ec_if; 216 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 217 ifp->if_softc = sc; 218 ifp->if_mtu = ETHERMTU; 219 ifp->if_baudrate = IF_Gbps(1); 220 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 221 ifp->if_ioctl = enet_ioctl; 222 ifp->if_start = enet_start; 223 ifp->if_init = enet_init; 224 ifp->if_stop = enet_stop; 225 ifp->if_watchdog = enet_watchdog; 226 227 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 228 #ifdef ENET_SUPPORT_JUMBO 229 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 230 #endif 231 232 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 233 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 234 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 235 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 236 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 237 238 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 239 IFQ_SET_READY(&ifp->if_snd); 240 241 /* setup MII */ 242 sc->sc_ethercom.ec_mii = mii; 243 mii->mii_ifp = ifp; 244 mii->mii_readreg = enet_miibus_readreg; 245 mii->mii_writereg = enet_miibus_writereg; 246 mii->mii_statchg = enet_miibus_statchg; 247 ifmedia_init(&mii->mii_media, 0, ether_mediachange, enet_mediastatus); 248 249 /* try to attach PHY */ 250 mii_attach(self, mii, 0xffffffff, sc->sc_phyid, MII_OFFSET_ANY, 0); 251 if (LIST_FIRST(&mii->mii_phys) == NULL) { 252 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 253 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 254 } else { 255 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 256 } 257 258 if_attach(ifp); 259 ether_ifattach(ifp, sc->sc_enaddr); 260 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 261 262 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 263 RND_TYPE_NET, RND_FLAG_DEFAULT); 264 265 #ifdef ENET_EVENT_COUNTER 266 enet_attach_evcnt(sc); 267 #endif 268 269 sc->sc_stopping = false; 270 271 return 0; 272 } 273 274 #ifdef ENET_EVENT_COUNTER 275 static void 276 enet_attach_evcnt(struct enet_softc *sc) 277 { 278 const char *xname; 279 280 xname = device_xname(sc->sc_dev); 281 282 #define ENET_EVCNT_ATTACH(name) \ 283 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 284 NULL, xname, #name); 285 286 ENET_EVCNT_ATTACH(t_drop); 287 ENET_EVCNT_ATTACH(t_packets); 288 ENET_EVCNT_ATTACH(t_bc_pkt); 289 ENET_EVCNT_ATTACH(t_mc_pkt); 290 ENET_EVCNT_ATTACH(t_crc_align); 291 ENET_EVCNT_ATTACH(t_undersize); 292 ENET_EVCNT_ATTACH(t_oversize); 293 ENET_EVCNT_ATTACH(t_frag); 294 ENET_EVCNT_ATTACH(t_jab); 295 ENET_EVCNT_ATTACH(t_col); 296 ENET_EVCNT_ATTACH(t_p64); 297 ENET_EVCNT_ATTACH(t_p65to127n); 298 ENET_EVCNT_ATTACH(t_p128to255n); 299 ENET_EVCNT_ATTACH(t_p256to511); 300 ENET_EVCNT_ATTACH(t_p512to1023); 301 ENET_EVCNT_ATTACH(t_p1024to2047); 302 ENET_EVCNT_ATTACH(t_p_gte2048); 303 ENET_EVCNT_ATTACH(t_octets); 304 ENET_EVCNT_ATTACH(r_packets); 305 ENET_EVCNT_ATTACH(r_bc_pkt); 306 ENET_EVCNT_ATTACH(r_mc_pkt); 307 ENET_EVCNT_ATTACH(r_crc_align); 308 ENET_EVCNT_ATTACH(r_undersize); 309 ENET_EVCNT_ATTACH(r_oversize); 310 ENET_EVCNT_ATTACH(r_frag); 311 ENET_EVCNT_ATTACH(r_jab); 312 ENET_EVCNT_ATTACH(r_p64); 313 ENET_EVCNT_ATTACH(r_p65to127); 314 ENET_EVCNT_ATTACH(r_p128to255); 315 ENET_EVCNT_ATTACH(r_p256to511); 316 ENET_EVCNT_ATTACH(r_p512to1023); 317 ENET_EVCNT_ATTACH(r_p1024to2047); 318 ENET_EVCNT_ATTACH(r_p_gte2048); 319 ENET_EVCNT_ATTACH(r_octets); 320 } 321 322 static void 323 enet_update_evcnt(struct enet_softc *sc) 324 { 325 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 326 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 327 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 328 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 329 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 330 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 331 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 332 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 333 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 334 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 335 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 336 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 337 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 338 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 339 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 340 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 341 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 342 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 343 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 344 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 345 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 346 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 347 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 348 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 349 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 350 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 351 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 352 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 353 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 354 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 355 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 356 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 357 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 358 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 359 } 360 #endif /* ENET_EVENT_COUNTER */ 361 362 static void 363 enet_tick(void *arg) 364 { 365 struct enet_softc *sc; 366 struct mii_data *mii; 367 struct ifnet *ifp; 368 int s; 369 370 sc = arg; 371 mii = &sc->sc_mii; 372 ifp = &sc->sc_ethercom.ec_if; 373 374 s = splnet(); 375 376 if (sc->sc_stopping) 377 goto out; 378 379 #ifdef ENET_EVENT_COUNTER 380 enet_update_evcnt(sc); 381 #endif 382 383 /* update counters */ 384 if_statadd(ifp, if_ierrors, 385 (uint64_t)ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE) + 386 (uint64_t)ENET_REG_READ(sc, ENET_RMON_R_FRAG) + 387 (uint64_t)ENET_REG_READ(sc, ENET_RMON_R_JAB)); 388 389 /* clear counters */ 390 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 391 ENET_REG_WRITE(sc, ENET_MIBC, 0); 392 393 mii_tick(mii); 394 out: 395 396 if (!sc->sc_stopping) 397 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 398 399 splx(s); 400 } 401 402 int 403 enet_intr(void *arg) 404 { 405 struct enet_softc *sc; 406 struct ifnet *ifp; 407 uint32_t status; 408 409 sc = arg; 410 status = ENET_REG_READ(sc, ENET_EIR); 411 412 if (sc->sc_imxtype == 7) { 413 if (status & (ENET_EIR_TXF | ENET_EIR_TXF1 | ENET_EIR_TXF2)) 414 enet_tx_intr(arg); 415 if (status & (ENET_EIR_RXF | ENET_EIR_RXF1 | ENET_EIR_RXF2)) 416 enet_rx_intr(arg); 417 } else { 418 if (status & ENET_EIR_TXF) 419 enet_tx_intr(arg); 420 if (status & ENET_EIR_RXF) 421 enet_rx_intr(arg); 422 } 423 424 if (status & ENET_EIR_EBERR) { 425 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 426 ifp = &sc->sc_ethercom.ec_if; 427 enet_stop(ifp, 1); 428 enet_init(ifp); 429 } else { 430 ENET_REG_WRITE(sc, ENET_EIR, status); 431 } 432 433 rnd_add_uint32(&sc->sc_rnd_source, status); 434 435 return 1; 436 } 437 438 static int 439 enet_tx_intr(void *arg) 440 { 441 struct enet_softc *sc; 442 struct ifnet *ifp; 443 struct enet_txsoft *txs; 444 int idx; 445 446 sc = (struct enet_softc *)arg; 447 ifp = &sc->sc_ethercom.ec_if; 448 449 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 450 idx = ENET_TX_NEXTIDX(idx)) { 451 452 txs = &sc->sc_txsoft[idx]; 453 454 TXDESC_READIN(idx); 455 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 456 /* This TX Descriptor has not been transmitted yet */ 457 break; 458 } 459 460 /* txsoft is available on first segment (TXFLAGS1_T1) */ 461 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 462 bus_dmamap_unload(sc->sc_dmat, 463 txs->txs_dmamap); 464 m_freem(txs->txs_mbuf); 465 if_statinc(ifp, if_opackets); 466 } 467 468 /* checking error */ 469 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 470 uint32_t flags2; 471 472 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 473 474 if (flags2 & (TXFLAGS2_TXE | 475 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 476 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 477 #ifdef DEBUG_ENET 478 if (enet_debug) { 479 char flagsbuf[128]; 480 481 snprintb(flagsbuf, sizeof(flagsbuf), 482 "\20" "\20TRANSMIT" "\16UNDERFLOW" 483 "\15COLLISION" "\14FRAME" 484 "\13LATECOLLISION" "\12OVERFLOW", 485 flags2); 486 487 device_printf(sc->sc_dev, 488 "txdesc[%d]: transmit error: " 489 "flags2=%s\n", idx, flagsbuf); 490 } 491 #endif /* DEBUG_ENET */ 492 if_statinc(ifp, if_oerrors); 493 } 494 } 495 496 sc->sc_tx_free++; 497 } 498 sc->sc_tx_considx = idx; 499 500 if (sc->sc_tx_free > 0) 501 ifp->if_flags &= ~IFF_OACTIVE; 502 503 /* 504 * No more pending TX descriptor, 505 * cancel the watchdog timer. 506 */ 507 if (sc->sc_tx_free == ENET_TX_RING_CNT) 508 ifp->if_timer = 0; 509 510 return 1; 511 } 512 513 static int 514 enet_rx_intr(void *arg) 515 { 516 struct enet_softc *sc; 517 struct ifnet *ifp; 518 struct enet_rxsoft *rxs; 519 int idx, len, amount; 520 uint32_t flags1, flags2; 521 struct mbuf *m, *m0, *mprev; 522 523 sc = arg; 524 ifp = &sc->sc_ethercom.ec_if; 525 526 m0 = mprev = NULL; 527 amount = 0; 528 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 529 530 rxs = &sc->sc_rxsoft[idx]; 531 532 RXDESC_READIN(idx); 533 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 534 /* This RX Descriptor has not been received yet */ 535 break; 536 } 537 538 /* 539 * build mbuf from RX Descriptor if needed 540 */ 541 m = rxs->rxs_mbuf; 542 rxs->rxs_mbuf = NULL; 543 544 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 545 len = RXFLAGS1_LEN(flags1); 546 547 #define RACC_SHIFT16 2 548 if (m0 == NULL) { 549 m0 = m; 550 m_adj(m0, RACC_SHIFT16); 551 len -= RACC_SHIFT16; 552 m->m_len = len; 553 amount = len; 554 } else { 555 if (flags1 & RXFLAGS1_L) 556 len = len - amount - RACC_SHIFT16; 557 558 m->m_len = len; 559 amount += len; 560 if (m->m_flags & M_PKTHDR) 561 m_remove_pkthdr(m); 562 mprev->m_next = m; 563 } 564 mprev = m; 565 566 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 567 568 if (flags1 & RXFLAGS1_L) { 569 /* last buffer */ 570 if ((amount < ETHER_HDR_LEN) || 571 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 572 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 573 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 574 RXFLAGS2_CE)))) { 575 576 #ifdef DEBUG_ENET 577 if (enet_debug) { 578 char flags1buf[128], flags2buf[128]; 579 snprintb(flags1buf, sizeof(flags1buf), 580 "\20" "\31MISS" "\26LENGTHVIOLATION" 581 "\25NONOCTET" "\23CRC" "\22OVERRUN" 582 "\21TRUNCATED", flags1); 583 snprintb(flags2buf, sizeof(flags2buf), 584 "\20" "\40MAC" "\33PHY" 585 "\32COLLISION", flags2); 586 587 DEVICE_DPRINTF( 588 "rxdesc[%d]: receive error: " 589 "flags1=%s,flags2=%s,len=%d\n", 590 idx, flags1buf, flags2buf, amount); 591 } 592 #endif /* DEBUG_ENET */ 593 if_statinc(ifp, if_ierrors); 594 m_freem(m0); 595 596 } else { 597 /* packet receive ok */ 598 m_set_rcvif(m0, ifp); 599 m0->m_pkthdr.len = amount; 600 601 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 602 rxs->rxs_dmamap->dm_mapsize, 603 BUS_DMASYNC_PREREAD); 604 605 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 606 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 607 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 608 enet_rx_csum(sc, ifp, m0, idx); 609 610 if_percpuq_enqueue(ifp->if_percpuq, m0); 611 } 612 613 m0 = NULL; 614 mprev = NULL; 615 amount = 0; 616 617 } else { 618 /* continued from previous buffer */ 619 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 620 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 621 } 622 623 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 624 if (enet_alloc_rxbuf(sc, idx) != 0) { 625 panic("enet_alloc_rxbuf NULL\n"); 626 } 627 } 628 sc->sc_rx_readidx = idx; 629 630 /* re-enable RX DMA to make sure */ 631 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 632 633 return 1; 634 } 635 636 static void 637 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 638 { 639 uint32_t flags2; 640 uint8_t proto; 641 642 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 643 644 if (flags2 & RXFLAGS2_IPV6) { 645 proto = sc->sc_rxdesc_ring[idx].rx_proto; 646 647 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 648 if ((proto == IPPROTO_TCP) && 649 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 650 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 651 else if ((proto == IPPROTO_UDP) && 652 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 653 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 654 else 655 return; 656 657 /* IPv6 protocol checksum error */ 658 if (flags2 & RXFLAGS2_PCR) 659 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 660 661 } else { 662 struct ether_header *eh; 663 uint8_t *ip; 664 665 eh = mtod(m, struct ether_header *); 666 667 /* XXX: is an IPv4? */ 668 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 669 return; 670 ip = (uint8_t *)(eh + 1); 671 if ((ip[0] & 0xf0) == 0x40) 672 return; 673 674 proto = sc->sc_rxdesc_ring[idx].rx_proto; 675 if (flags2 & RXFLAGS2_ICE) { 676 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 677 m->m_pkthdr.csum_flags |= 678 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 679 } 680 } else { 681 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 682 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 683 } 684 685 /* 686 * PCR is valid when 687 * ICE == 0 and FRAG == 0 688 */ 689 if (flags2 & RXFLAGS2_FRAG) 690 return; 691 692 /* 693 * PCR is valid when proto is TCP or UDP 694 */ 695 if ((proto == IPPROTO_TCP) && 696 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 697 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 698 else if ((proto == IPPROTO_UDP) && 699 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 700 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 701 else 702 return; 703 704 /* IPv4 protocol cksum error */ 705 if (flags2 & RXFLAGS2_PCR) 706 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 707 } 708 } 709 } 710 711 static void 712 enet_setmulti(struct enet_softc *sc) 713 { 714 struct ethercom *ec = &sc->sc_ethercom; 715 struct ifnet *ifp = &ec->ec_if; 716 struct ether_multi *enm; 717 struct ether_multistep step; 718 uint32_t crc, hashidx; 719 uint32_t gaddr[2]; 720 721 if (ifp->if_flags & IFF_PROMISC) { 722 /* receive all unicast packet */ 723 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 724 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 725 /* receive all multicast packet */ 726 gaddr[0] = gaddr[1] = 0xffffffff; 727 } else { 728 gaddr[0] = gaddr[1] = 0; 729 730 ETHER_LOCK(ec); 731 ETHER_FIRST_MULTI(step, ec, enm); 732 while (enm != NULL) { 733 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 734 ETHER_ADDR_LEN)) { 735 /* 736 * if specified by range, give up setting hash, 737 * and fallback to allmulti. 738 */ 739 gaddr[0] = gaddr[1] = 0xffffffff; 740 break; 741 } 742 743 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 744 hashidx = __SHIFTOUT(crc, __BITS(30,26)); 745 gaddr[__SHIFTOUT(crc, __BIT(31))] |= __BIT(hashidx); 746 747 ETHER_NEXT_MULTI(step, enm); 748 } 749 ETHER_UNLOCK(ec); 750 751 /* dont't receive any unicast packet (except own address) */ 752 ENET_REG_WRITE(sc, ENET_IAUR, 0); 753 ENET_REG_WRITE(sc, ENET_IALR, 0); 754 } 755 756 if (gaddr[0] == 0xffffffff && gaddr[1] == 0xffffffff) 757 ifp->if_flags |= IFF_ALLMULTI; 758 else 759 ifp->if_flags &= ~IFF_ALLMULTI; 760 761 /* receive multicast packets according to multicast filter */ 762 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[1]); 763 ENET_REG_WRITE(sc, ENET_GALR, gaddr[0]); 764 765 } 766 767 static void 768 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 769 { 770 uint32_t paddr; 771 772 paddr = ENET_REG_READ(sc, ENET_PALR); 773 hwaddr[0] = paddr >> 24; 774 hwaddr[1] = paddr >> 16; 775 hwaddr[2] = paddr >> 8; 776 hwaddr[3] = paddr; 777 778 paddr = ENET_REG_READ(sc, ENET_PAUR); 779 hwaddr[4] = paddr >> 24; 780 hwaddr[5] = paddr >> 16; 781 } 782 783 static void 784 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 785 { 786 uint32_t paddr; 787 788 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 789 hwaddr[3]; 790 ENET_REG_WRITE(sc, ENET_PALR, paddr); 791 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 792 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 793 } 794 795 /* 796 * ifnet interfaces 797 */ 798 static int 799 enet_init(struct ifnet *ifp) 800 { 801 struct enet_softc *sc; 802 int s, error; 803 804 sc = ifp->if_softc; 805 806 s = splnet(); 807 808 enet_init_regs(sc, 0); 809 enet_init_txring(sc); 810 error = enet_init_rxring(sc); 811 if (error != 0) { 812 enet_drain_rxbuf(sc); 813 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 814 goto init_failure; 815 } 816 817 /* reload mac address */ 818 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 819 enet_sethwaddr(sc, sc->sc_enaddr); 820 821 /* program multicast address */ 822 enet_setmulti(sc); 823 824 /* update if_flags */ 825 ifp->if_flags |= IFF_RUNNING; 826 ifp->if_flags &= ~IFF_OACTIVE; 827 828 /* update local copy of if_flags */ 829 sc->sc_if_flags = ifp->if_flags; 830 831 /* mii */ 832 mii_mediachg(&sc->sc_mii); 833 834 /* enable RX DMA */ 835 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 836 837 sc->sc_stopping = false; 838 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 839 840 init_failure: 841 splx(s); 842 843 return error; 844 } 845 846 static void 847 enet_start(struct ifnet *ifp) 848 { 849 struct enet_softc *sc; 850 struct mbuf *m; 851 int npkt; 852 853 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 854 return; 855 856 sc = ifp->if_softc; 857 for (npkt = 0; ; npkt++) { 858 IFQ_POLL(&ifp->if_snd, m); 859 if (m == NULL) 860 break; 861 862 if (sc->sc_tx_free <= 0) { 863 /* no tx descriptor now... */ 864 ifp->if_flags |= IFF_OACTIVE; 865 DEVICE_DPRINTF("TX descriptor is full\n"); 866 break; 867 } 868 869 IFQ_DEQUEUE(&ifp->if_snd, m); 870 871 if (enet_encap_txring(sc, &m) != 0) { 872 /* too many mbuf chains? */ 873 ifp->if_flags |= IFF_OACTIVE; 874 DEVICE_DPRINTF( 875 "TX descriptor is full. dropping packet\n"); 876 m_freem(m); 877 if_statinc(ifp, if_oerrors); 878 break; 879 } 880 881 /* Pass the packet to any BPF listeners */ 882 bpf_mtap(ifp, m, BPF_D_OUT); 883 } 884 885 if (npkt) { 886 /* enable TX DMA */ 887 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 888 889 ifp->if_timer = 5; 890 } 891 } 892 893 static void 894 enet_stop(struct ifnet *ifp, int disable) 895 { 896 struct enet_softc *sc; 897 int s; 898 uint32_t v; 899 900 sc = ifp->if_softc; 901 902 s = splnet(); 903 904 sc->sc_stopping = true; 905 callout_stop(&sc->sc_tick_ch); 906 907 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 908 v = ENET_REG_READ(sc, ENET_ECR); 909 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 910 911 /* Mark the interface as down and cancel the watchdog timer. */ 912 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 913 ifp->if_timer = 0; 914 915 if (disable) { 916 enet_drain_txbuf(sc); 917 enet_drain_rxbuf(sc); 918 } 919 920 splx(s); 921 } 922 923 static void 924 enet_watchdog(struct ifnet *ifp) 925 { 926 struct enet_softc *sc; 927 int s; 928 929 sc = ifp->if_softc; 930 s = splnet(); 931 932 device_printf(sc->sc_dev, "watchdog timeout\n"); 933 if_statinc(ifp, if_oerrors); 934 935 /* salvage packets left in descriptors */ 936 enet_tx_intr(sc); 937 enet_rx_intr(sc); 938 939 /* reset */ 940 enet_stop(ifp, 1); 941 enet_init(ifp); 942 943 splx(s); 944 } 945 946 static void 947 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 948 { 949 struct enet_softc *sc = ifp->if_softc; 950 951 ether_mediastatus(ifp, ifmr); 952 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 953 | sc->sc_flowflags; 954 } 955 956 static int 957 enet_ifflags_cb(struct ethercom *ec) 958 { 959 struct ifnet *ifp = &ec->ec_if; 960 struct enet_softc *sc = ifp->if_softc; 961 u_short change = ifp->if_flags ^ sc->sc_if_flags; 962 963 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 964 return ENETRESET; 965 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 966 return 0; 967 968 enet_setmulti(sc); 969 970 sc->sc_if_flags = ifp->if_flags; 971 return 0; 972 } 973 974 static int 975 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 976 { 977 struct enet_softc *sc; 978 struct ifreq *ifr; 979 int s, error; 980 uint32_t v; 981 982 sc = ifp->if_softc; 983 ifr = data; 984 985 error = 0; 986 987 s = splnet(); 988 989 switch (command) { 990 case SIOCSIFMTU: 991 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 992 error = EINVAL; 993 } else { 994 ifp->if_mtu = ifr->ifr_mtu; 995 996 /* set maximum frame length */ 997 v = MTU2FRAMESIZE(ifr->ifr_mtu); 998 ENET_REG_WRITE(sc, ENET_FTRL, v); 999 v = ENET_REG_READ(sc, ENET_RCR); 1000 v &= ~ENET_RCR_MAX_FL(0x3fff); 1001 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 1002 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1003 ENET_REG_WRITE(sc, ENET_RCR, v); 1004 } 1005 break; 1006 case SIOCSIFMEDIA: 1007 /* Flow control requires full-duplex mode. */ 1008 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1009 (ifr->ifr_media & IFM_FDX) == 0) 1010 ifr->ifr_media &= ~IFM_ETH_FMASK; 1011 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1012 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1013 /* We can do both TXPAUSE and RXPAUSE. */ 1014 ifr->ifr_media |= 1015 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1016 } 1017 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1018 } 1019 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1020 break; 1021 default: 1022 error = ether_ioctl(ifp, command, data); 1023 if (error != ENETRESET) 1024 break; 1025 1026 /* post-process */ 1027 error = 0; 1028 switch (command) { 1029 case SIOCSIFCAP: 1030 error = (*ifp->if_init)(ifp); 1031 break; 1032 case SIOCADDMULTI: 1033 case SIOCDELMULTI: 1034 if (ifp->if_flags & IFF_RUNNING) 1035 enet_setmulti(sc); 1036 break; 1037 } 1038 break; 1039 } 1040 1041 splx(s); 1042 1043 return error; 1044 } 1045 1046 /* 1047 * for MII 1048 */ 1049 static int 1050 enet_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1051 { 1052 struct enet_softc *sc; 1053 int timeout; 1054 uint32_t status; 1055 1056 sc = device_private(dev); 1057 1058 /* clear MII update */ 1059 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1060 1061 /* read command */ 1062 ENET_REG_WRITE(sc, ENET_MMFR, 1063 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1064 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1065 1066 /* check MII update */ 1067 for (timeout = 5000; timeout > 0; --timeout) { 1068 status = ENET_REG_READ(sc, ENET_EIR); 1069 if (status & ENET_EIR_MII) 1070 break; 1071 } 1072 if (timeout <= 0) { 1073 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1074 reg); 1075 return ETIMEDOUT; 1076 } else 1077 *val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1078 1079 return 0; 1080 } 1081 1082 static int 1083 enet_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1084 { 1085 struct enet_softc *sc; 1086 int timeout; 1087 1088 sc = device_private(dev); 1089 1090 /* clear MII update */ 1091 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1092 1093 /* write command */ 1094 ENET_REG_WRITE(sc, ENET_MMFR, 1095 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1096 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1097 (ENET_MMFR_DATAMASK & val)); 1098 1099 /* check MII update */ 1100 for (timeout = 5000; timeout > 0; --timeout) { 1101 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1102 break; 1103 } 1104 if (timeout <= 0) { 1105 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", reg); 1106 return ETIMEDOUT; 1107 } 1108 1109 return 0; 1110 } 1111 1112 static void 1113 enet_miibus_statchg(struct ifnet *ifp) 1114 { 1115 struct enet_softc *sc; 1116 struct mii_data *mii; 1117 struct ifmedia_entry *ife; 1118 uint32_t ecr, ecr0; 1119 uint32_t rcr, rcr0; 1120 uint32_t tcr, tcr0; 1121 1122 sc = ifp->if_softc; 1123 mii = &sc->sc_mii; 1124 ife = mii->mii_media.ifm_cur; 1125 1126 /* get current status */ 1127 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1128 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1129 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1130 1131 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1132 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1133 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1134 mii->mii_media_active &= ~IFM_ETH_FMASK; 1135 } 1136 1137 if ((ife->ifm_media & IFM_FDX) != 0) { 1138 tcr |= ENET_TCR_FDEN; /* full duplex */ 1139 rcr &= ~ENET_RCR_DRT; /* enable receive on transmit */ 1140 } else { 1141 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1142 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1143 } 1144 1145 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1146 /* 1147 * need to reset because 1148 * FDEN can change when ECR[ETHEREN] is 0 1149 */ 1150 enet_init_regs(sc, 0); 1151 return; 1152 } 1153 1154 switch (IFM_SUBTYPE(ife->ifm_media)) { 1155 case IFM_AUTO: 1156 case IFM_1000_T: 1157 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1158 rcr &= ~ENET_RCR_RMII_10T; 1159 break; 1160 case IFM_100_TX: 1161 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1162 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1163 break; 1164 case IFM_10_T: 1165 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1166 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1167 break; 1168 default: 1169 ecr = ecr0; 1170 rcr = rcr0; 1171 tcr = tcr0; 1172 break; 1173 } 1174 1175 if (sc->sc_rgmii == 0) 1176 ecr &= ~ENET_ECR_SPEED; 1177 1178 if (sc->sc_flowflags & IFM_FLOW) 1179 rcr |= ENET_RCR_FCE; 1180 else 1181 rcr &= ~ENET_RCR_FCE; 1182 1183 /* update registers if need change */ 1184 if (ecr != ecr0) 1185 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1186 if (rcr != rcr0) 1187 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1188 if (tcr != tcr0) 1189 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1190 } 1191 1192 /* 1193 * handling descriptors 1194 */ 1195 static void 1196 enet_init_txring(struct enet_softc *sc) 1197 { 1198 int i; 1199 1200 /* build TX ring */ 1201 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1202 sc->sc_txdesc_ring[i].tx_flags1_len = 1203 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1204 sc->sc_txdesc_ring[i].tx_databuf = 0; 1205 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1206 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1207 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1208 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1209 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1210 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1211 1212 TXDESC_WRITEOUT(i); 1213 } 1214 1215 sc->sc_tx_free = ENET_TX_RING_CNT; 1216 sc->sc_tx_considx = 0; 1217 sc->sc_tx_prodidx = 0; 1218 } 1219 1220 static int 1221 enet_init_rxring(struct enet_softc *sc) 1222 { 1223 int i, error; 1224 1225 /* build RX ring */ 1226 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1227 error = enet_alloc_rxbuf(sc, i); 1228 if (error != 0) 1229 return error; 1230 } 1231 1232 sc->sc_rx_readidx = 0; 1233 1234 return 0; 1235 } 1236 1237 static int 1238 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1239 { 1240 struct mbuf *m; 1241 int error; 1242 1243 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1244 1245 /* free mbuf if already allocated */ 1246 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1247 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1248 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1249 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1250 } 1251 1252 /* allocate new mbuf cluster */ 1253 MGETHDR(m, M_DONTWAIT, MT_DATA); 1254 if (m == NULL) 1255 return ENOBUFS; 1256 MCLGET(m, M_DONTWAIT); 1257 if (!(m->m_flags & M_EXT)) { 1258 m_freem(m); 1259 return ENOBUFS; 1260 } 1261 m->m_len = MCLBYTES; 1262 m->m_next = NULL; 1263 1264 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1265 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1266 BUS_DMA_READ | BUS_DMA_NOWAIT); 1267 if (error) { 1268 m_freem(m); 1269 return error; 1270 } 1271 1272 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1273 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1274 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1275 1276 sc->sc_rxsoft[idx].rxs_mbuf = m; 1277 enet_reset_rxdesc(sc, idx); 1278 return 0; 1279 } 1280 1281 static void 1282 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1283 { 1284 uint32_t paddr; 1285 1286 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1287 1288 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1289 RXFLAGS1_E | 1290 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1291 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1292 sc->sc_rxdesc_ring[idx].rx_flags2 = 1293 RXFLAGS2_INT; 1294 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1295 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1296 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1297 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1298 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1299 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1300 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1301 1302 RXDESC_WRITEOUT(idx); 1303 } 1304 1305 static void 1306 enet_drain_txbuf(struct enet_softc *sc) 1307 { 1308 int idx; 1309 struct enet_txsoft *txs; 1310 struct ifnet *ifp; 1311 1312 ifp = &sc->sc_ethercom.ec_if; 1313 1314 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1315 idx = ENET_TX_NEXTIDX(idx)) { 1316 1317 /* txsoft[] is used only first segment */ 1318 txs = &sc->sc_txsoft[idx]; 1319 TXDESC_READIN(idx); 1320 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1321 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1322 bus_dmamap_unload(sc->sc_dmat, 1323 txs->txs_dmamap); 1324 m_freem(txs->txs_mbuf); 1325 1326 if_statinc(ifp, if_oerrors); 1327 } 1328 sc->sc_tx_free++; 1329 } 1330 } 1331 1332 static void 1333 enet_drain_rxbuf(struct enet_softc *sc) 1334 { 1335 int i; 1336 1337 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1338 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1339 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1340 bus_dmamap_unload(sc->sc_dmat, 1341 sc->sc_rxsoft[i].rxs_dmamap); 1342 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1343 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1344 } 1345 } 1346 } 1347 1348 static int 1349 enet_alloc_ring(struct enet_softc *sc) 1350 { 1351 int i, error; 1352 1353 /* 1354 * build DMA maps for TX. 1355 * TX descriptor must be able to contain mbuf chains, 1356 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1357 */ 1358 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1359 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1360 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1361 &sc->sc_txsoft[i].txs_dmamap); 1362 1363 if (error) { 1364 aprint_error_dev(sc->sc_dev, 1365 "can't create DMA map for TX descs\n"); 1366 goto fail_1; 1367 } 1368 } 1369 1370 /* 1371 * build DMA maps for RX. 1372 * RX descripter contains An mbuf cluster, 1373 * and make up a dmamap. 1374 */ 1375 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1376 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1377 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1378 &sc->sc_rxsoft[i].rxs_dmamap); 1379 if (error) { 1380 aprint_error_dev(sc->sc_dev, 1381 "can't create DMA map for RX descs\n"); 1382 goto fail_2; 1383 } 1384 } 1385 1386 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1387 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1388 return -1; 1389 memset(sc->sc_txdesc_ring, 0, 1390 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1391 1392 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1393 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1394 return -1; 1395 memset(sc->sc_rxdesc_ring, 0, 1396 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1397 1398 return 0; 1399 1400 fail_2: 1401 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1402 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1403 bus_dmamap_destroy(sc->sc_dmat, 1404 sc->sc_rxsoft[i].rxs_dmamap); 1405 } 1406 fail_1: 1407 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1408 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1409 bus_dmamap_destroy(sc->sc_dmat, 1410 sc->sc_txsoft[i].txs_dmamap); 1411 } 1412 return error; 1413 } 1414 1415 static int 1416 enet_encap_mbufalign(struct mbuf **mp) 1417 { 1418 struct mbuf *m, *m0, *mt, *p, *x; 1419 void *ap; 1420 uint32_t alignoff, chiplen; 1421 1422 /* 1423 * iMX6 SoC ethernet controller requires 1424 * address of buffer must aligned 8, and 1425 * length of buffer must be greater than 10 (first fragment only?) 1426 */ 1427 #define ALIGNBYTE 8 1428 #define MINBUFSIZE 10 1429 #define ALIGN_PTR(p, align) \ 1430 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1431 1432 m0 = *mp; 1433 mt = p = NULL; 1434 for (m = m0; m != NULL; m = m->m_next) { 1435 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1436 if (m->m_len < (ALIGNBYTE * 2)) { 1437 /* 1438 * rearrange mbuf data aligned 1439 * 1440 * align 8 * * * * * 1441 * +0123456789abcdef0123456789abcdef0 1442 * FROM m->m_data[___________abcdefghijklmn_______] 1443 * 1444 * +0123456789abcdef0123456789abcdef0 1445 * TO m->m_data[________abcdefghijklm___________] or 1446 * m->m_data[________________abcdefghijklmn__] 1447 */ 1448 if ((alignoff != 0) && (m->m_len != 0)) { 1449 chiplen = ALIGNBYTE - alignoff; 1450 if (M_LEADINGSPACE(m) >= alignoff) { 1451 ap = m->m_data - alignoff; 1452 memmove(ap, m->m_data, m->m_len); 1453 m->m_data = ap; 1454 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1455 ap = m->m_data + chiplen; 1456 memmove(ap, m->m_data, m->m_len); 1457 m->m_data = ap; 1458 } else { 1459 /* 1460 * no space to align data. (M_READONLY?) 1461 * allocate new mbuf aligned, 1462 * and copy to it. 1463 */ 1464 MGET(x, M_DONTWAIT, m->m_type); 1465 if (x == NULL) { 1466 m_freem(m); 1467 return ENOBUFS; 1468 } 1469 MCLAIM(x, m->m_owner); 1470 if (m->m_flags & M_PKTHDR) 1471 m_move_pkthdr(x, m); 1472 x->m_len = m->m_len; 1473 x->m_data = ALIGN_PTR(x->m_data, 1474 ALIGNBYTE); 1475 memcpy(mtod(x, void *), mtod(m, void *), 1476 m->m_len); 1477 p->m_next = x; 1478 x->m_next = m_free(m); 1479 m = x; 1480 } 1481 } 1482 1483 /* 1484 * fill 1st mbuf at least 10byte 1485 * 1486 * align 8 * * * * * 1487 * +0123456789abcdef0123456789abcdef0 1488 * FROM m->m_data[________abcde___________________] 1489 * m->m_data[__fg____________________________] 1490 * m->m_data[_________________hi_____________] 1491 * m->m_data[__________jk____________________] 1492 * m->m_data[____l___________________________] 1493 * 1494 * +0123456789abcdef0123456789abcdef0 1495 * TO m->m_data[________abcdefghij______________] 1496 * m->m_data[________________________________] 1497 * m->m_data[________________________________] 1498 * m->m_data[___________k____________________] 1499 * m->m_data[____l___________________________] 1500 */ 1501 if (mt == NULL) { 1502 mt = m; 1503 while (mt->m_len == 0) { 1504 mt = mt->m_next; 1505 if (mt == NULL) { 1506 m_freem(m); 1507 return ENOBUFS; 1508 } 1509 } 1510 1511 /* mt = 1st mbuf, x = 2nd mbuf */ 1512 x = mt->m_next; 1513 while (mt->m_len < MINBUFSIZE) { 1514 if (x == NULL) { 1515 m_freem(m); 1516 return ENOBUFS; 1517 } 1518 1519 alignoff = (uintptr_t)x->m_data & 1520 (ALIGNBYTE - 1); 1521 chiplen = ALIGNBYTE - alignoff; 1522 if (chiplen > x->m_len) { 1523 chiplen = x->m_len; 1524 } else if ((mt->m_len + chiplen) < 1525 MINBUFSIZE) { 1526 /* 1527 * next mbuf should be greater 1528 * than ALIGNBYTE? 1529 */ 1530 if (x->m_len >= (chiplen + 1531 ALIGNBYTE * 2)) 1532 chiplen += ALIGNBYTE; 1533 else 1534 chiplen = x->m_len; 1535 } 1536 1537 if (chiplen && 1538 (M_TRAILINGSPACE(mt) < chiplen)) { 1539 /* 1540 * move data to the begining of 1541 * m_dat[] (aligned) to en- 1542 * large trailingspace 1543 */ 1544 ap = M_BUFADDR(mt); 1545 ap = ALIGN_PTR(ap, ALIGNBYTE); 1546 memcpy(ap, mt->m_data, 1547 mt->m_len); 1548 mt->m_data = ap; 1549 } 1550 1551 if (chiplen && 1552 (M_TRAILINGSPACE(mt) >= chiplen)) { 1553 memcpy(mt->m_data + mt->m_len, 1554 x->m_data, chiplen); 1555 mt->m_len += chiplen; 1556 m_adj(x, chiplen); 1557 } 1558 1559 x = x->m_next; 1560 } 1561 } 1562 1563 } else { 1564 mt = m; 1565 1566 /* 1567 * allocate new mbuf x, and rearrange as below; 1568 * 1569 * align 8 * * * * * 1570 * +0123456789abcdef0123456789abcdef0 1571 * FROM m->m_data[____________abcdefghijklmnopq___] 1572 * 1573 * +0123456789abcdef0123456789abcdef0 1574 * TO x->m_data[________abcdefghijkl____________] 1575 * m->m_data[________________________mnopq___] 1576 * 1577 */ 1578 if (alignoff != 0) { 1579 /* at least ALIGNBYTE */ 1580 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1581 1582 MGET(x, M_DONTWAIT, m->m_type); 1583 if (x == NULL) { 1584 m_freem(m); 1585 return ENOBUFS; 1586 } 1587 MCLAIM(x, m->m_owner); 1588 if (m->m_flags & M_PKTHDR) 1589 m_move_pkthdr(x, m); 1590 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1591 memcpy(mtod(x, void *), mtod(m, void *), 1592 chiplen); 1593 x->m_len = chiplen; 1594 x->m_next = m; 1595 m_adj(m, chiplen); 1596 1597 if (p == NULL) 1598 m0 = x; 1599 else 1600 p->m_next = x; 1601 } 1602 } 1603 p = m; 1604 } 1605 *mp = m0; 1606 1607 return 0; 1608 } 1609 1610 static int 1611 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1612 { 1613 bus_dmamap_t map; 1614 struct mbuf *m; 1615 int csumflags, idx, i, error; 1616 uint32_t flags1, flags2; 1617 1618 idx = sc->sc_tx_prodidx; 1619 map = sc->sc_txsoft[idx].txs_dmamap; 1620 1621 /* align mbuf data for claim of ENET */ 1622 error = enet_encap_mbufalign(mp); 1623 if (error != 0) 1624 return error; 1625 1626 m = *mp; 1627 csumflags = m->m_pkthdr.csum_flags; 1628 1629 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1630 BUS_DMA_NOWAIT); 1631 if (error != 0) { 1632 device_printf(sc->sc_dev, 1633 "Error mapping mbuf into TX chain: error=%d\n", error); 1634 m_freem(m); 1635 return error; 1636 } 1637 1638 if (map->dm_nsegs > sc->sc_tx_free) { 1639 bus_dmamap_unload(sc->sc_dmat, map); 1640 device_printf(sc->sc_dev, 1641 "too many mbuf chain %d\n", map->dm_nsegs); 1642 m_freem(m); 1643 return ENOBUFS; 1644 } 1645 1646 /* fill protocol cksum zero beforehand */ 1647 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1648 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1649 int ehlen; 1650 uint16_t etype; 1651 1652 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1653 switch (ntohs(etype)) { 1654 case ETHERTYPE_IP: 1655 case ETHERTYPE_IPV6: 1656 ehlen = ETHER_HDR_LEN; 1657 break; 1658 case ETHERTYPE_VLAN: 1659 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1660 break; 1661 default: 1662 ehlen = 0; 1663 break; 1664 } 1665 1666 if (ehlen) { 1667 const int off = 1668 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1669 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1670 if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1671 uint16_t zero = 0; 1672 m_copyback(m, ehlen + off, sizeof(zero), &zero); 1673 } 1674 } 1675 } 1676 1677 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1678 BUS_DMASYNC_PREWRITE); 1679 1680 for (i = 0; i < map->dm_nsegs; i++) { 1681 flags1 = TXFLAGS1_R; 1682 flags2 = 0; 1683 1684 if (i == 0) { 1685 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1686 sc->sc_txsoft[idx].txs_mbuf = m; 1687 } 1688 1689 /* checksum offloading */ 1690 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1691 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1692 flags2 |= TXFLAGS2_PINS; 1693 if (csumflags & (M_CSUM_IPv4)) 1694 flags2 |= TXFLAGS2_IINS; 1695 1696 if (i == map->dm_nsegs - 1) { 1697 /* mark last segment */ 1698 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1699 flags2 |= TXFLAGS2_INT; 1700 } 1701 if (idx == ENET_TX_RING_CNT - 1) { 1702 /* mark end of ring */ 1703 flags1 |= TXFLAGS1_W; 1704 } 1705 1706 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1707 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1708 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1709 TXDESC_WRITEOUT(idx); 1710 1711 sc->sc_txdesc_ring[idx].tx_flags1_len = 1712 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1713 TXDESC_WRITEOUT(idx); 1714 1715 idx = ENET_TX_NEXTIDX(idx); 1716 sc->sc_tx_free--; 1717 } 1718 1719 sc->sc_tx_prodidx = idx; 1720 1721 return 0; 1722 } 1723 1724 /* 1725 * device initialize 1726 */ 1727 static int 1728 enet_init_regs(struct enet_softc *sc, int init) 1729 { 1730 struct mii_data *mii; 1731 struct ifmedia_entry *ife; 1732 paddr_t paddr; 1733 uint32_t val; 1734 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1735 1736 if (init) { 1737 fulldup = 1; 1738 ecr_speed = ENET_ECR_SPEED; 1739 rcr_speed = 0; 1740 flowctrl = 0; 1741 } else { 1742 mii = &sc->sc_mii; 1743 ife = mii->mii_media.ifm_cur; 1744 1745 if ((ife->ifm_media & IFM_FDX) != 0) 1746 fulldup = 1; 1747 else 1748 fulldup = 0; 1749 1750 switch (IFM_SUBTYPE(ife->ifm_media)) { 1751 case IFM_10_T: 1752 ecr_speed = 0; 1753 rcr_speed = ENET_RCR_RMII_10T; 1754 break; 1755 case IFM_100_TX: 1756 ecr_speed = 0; 1757 rcr_speed = 0; 1758 break; 1759 default: 1760 ecr_speed = ENET_ECR_SPEED; 1761 rcr_speed = 0; 1762 break; 1763 } 1764 1765 flowctrl = sc->sc_flowflags & IFM_FLOW; 1766 } 1767 1768 if (sc->sc_rgmii == 0) 1769 ecr_speed = 0; 1770 1771 /* reset */ 1772 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1773 1774 /* mask and clear all interrupt */ 1775 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1776 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1777 1778 /* full duplex */ 1779 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1780 1781 /* clear and enable MIB register */ 1782 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1783 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1784 1785 /* MII speed setup. MDCclk(=2.5MHz) = (internal module clock)/((val+1)*2) */ 1786 val = (sc->sc_clock + (5000000 - 1)) / 5000000 - 1; 1787 ENET_REG_WRITE(sc, ENET_MSCR, __SHIFTIN(val, ENET_MSCR_MII_SPEED)); 1788 1789 /* Opcode/Pause Duration */ 1790 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1791 1792 /* Receive FIFO */ 1793 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1794 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1795 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1796 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1797 1798 /* Transmit FIFO */ 1799 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1800 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1801 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1802 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1803 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1804 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1805 1806 /* hardware checksum is default off (override in TX descripter) */ 1807 ENET_REG_WRITE(sc, ENET_TACC, 0); 1808 1809 /* 1810 * align ethernet payload on 32bit, discard frames with MAC layer error, 1811 * and don't discard checksum error 1812 */ 1813 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1814 1815 /* maximum frame size */ 1816 val = ENET_DEFAULT_PKT_LEN; 1817 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1818 1819 if (sc->sc_rgmii == 0) 1820 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1821 else 1822 miimode = ENET_RCR_RGMII_EN; 1823 ENET_REG_WRITE(sc, ENET_RCR, 1824 ENET_RCR_PADEN | /* RX frame padding remove */ 1825 miimode | 1826 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1827 rcr_speed | 1828 (fulldup ? 0 : ENET_RCR_DRT) | 1829 ENET_RCR_MAX_FL(val)); 1830 1831 /* Maximum Receive BufSize per one descriptor */ 1832 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1833 1834 1835 /* TX/RX Descriptor Physical Address */ 1836 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1837 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1838 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1839 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1840 /* sync cache */ 1841 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1842 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1843 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1844 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1845 1846 /* enable interrupts */ 1847 val = ENET_EIR_TXF | ENET_EIR_RXF | ENET_EIR_EBERR; 1848 if (sc->sc_imxtype == 7) 1849 val |= ENET_EIR_TXF2 | ENET_EIR_RXF2 | ENET_EIR_TXF1 | 1850 ENET_EIR_RXF1; 1851 ENET_REG_WRITE(sc, ENET_EIMR, val); 1852 1853 /* enable ether */ 1854 ENET_REG_WRITE(sc, ENET_ECR, 1855 #if _BYTE_ORDER == _LITTLE_ENDIAN 1856 ENET_ECR_DBSWP | 1857 #endif 1858 ecr_speed | 1859 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1860 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1861 1862 return 0; 1863 } 1864 1865 static int 1866 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1867 bus_dmamap_t *mapp) 1868 { 1869 bus_dma_segment_t seglist[1]; 1870 int nsegs, error; 1871 1872 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1873 1, &nsegs, M_NOWAIT)) != 0) { 1874 device_printf(sc->sc_dev, 1875 "unable to allocate DMA buffer, error=%d\n", error); 1876 goto fail_alloc; 1877 } 1878 1879 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1880 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1881 device_printf(sc->sc_dev, 1882 "unable to map DMA buffer, error=%d\n", 1883 error); 1884 goto fail_map; 1885 } 1886 1887 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1888 BUS_DMA_NOWAIT, mapp)) != 0) { 1889 device_printf(sc->sc_dev, 1890 "unable to create DMA map, error=%d\n", error); 1891 goto fail_create; 1892 } 1893 1894 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1895 BUS_DMA_NOWAIT)) != 0) { 1896 aprint_error_dev(sc->sc_dev, 1897 "unable to load DMA map, error=%d\n", error); 1898 goto fail_load; 1899 } 1900 1901 return 0; 1902 1903 fail_load: 1904 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1905 fail_create: 1906 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1907 fail_map: 1908 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1909 fail_alloc: 1910 return error; 1911 } 1912