1 /* $NetBSD: if_enet.c,v 1.28 2019/11/12 05:09:29 hkenken Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.28 2019/11/12 05:09:29 hkenken Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static void enet_tick(void *); 136 static int enet_tx_intr(void *); 137 static int enet_rx_intr(void *); 138 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 139 int); 140 141 static void enet_start(struct ifnet *); 142 static int enet_ifflags_cb(struct ethercom *); 143 static int enet_ioctl(struct ifnet *, u_long, void *); 144 static int enet_init(struct ifnet *); 145 static void enet_stop(struct ifnet *, int); 146 static void enet_watchdog(struct ifnet *); 147 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 148 149 static int enet_miibus_readreg(device_t, int, int, uint16_t *); 150 static int enet_miibus_writereg(device_t, int, int, uint16_t); 151 static void enet_miibus_statchg(struct ifnet *); 152 153 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 154 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_setmulti(struct enet_softc *); 156 static int enet_encap_mbufalign(struct mbuf **); 157 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 158 static int enet_init_regs(struct enet_softc *, int); 159 static int enet_alloc_ring(struct enet_softc *); 160 static void enet_init_txring(struct enet_softc *); 161 static int enet_init_rxring(struct enet_softc *); 162 static void enet_reset_rxdesc(struct enet_softc *, int); 163 static int enet_alloc_rxbuf(struct enet_softc *, int); 164 static void enet_drain_txbuf(struct enet_softc *); 165 static void enet_drain_rxbuf(struct enet_softc *); 166 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 167 bus_dmamap_t *); 168 169 int 170 enet_attach_common(device_t self) 171 { 172 struct enet_softc *sc = device_private(self); 173 struct ifnet *ifp; 174 struct mii_data * const mii = &sc->sc_mii; 175 176 /* allocate dma buffer */ 177 if (enet_alloc_ring(sc)) 178 return -1; 179 180 #define IS_ENADDR_ZERO(enaddr) \ 181 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 182 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 183 184 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 185 /* by any chance, mac-address is already set by bootloader? */ 186 enet_gethwaddr(sc, sc->sc_enaddr); 187 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 188 /* give up. set randomly */ 189 uint32_t eaddr = random(); 190 /* not multicast */ 191 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 192 sc->sc_enaddr[1] = eaddr >> 16; 193 sc->sc_enaddr[2] = eaddr >> 8; 194 sc->sc_enaddr[3] = eaddr; 195 eaddr = random(); 196 sc->sc_enaddr[4] = eaddr >> 8; 197 sc->sc_enaddr[5] = eaddr; 198 199 aprint_error_dev(self, 200 "cannot get mac address. set randomly\n"); 201 } 202 } 203 enet_sethwaddr(sc, sc->sc_enaddr); 204 205 aprint_normal_dev(self, "Ethernet address %s\n", 206 ether_sprintf(sc->sc_enaddr)); 207 208 enet_init_regs(sc, 1); 209 210 /* callout will be scheduled from enet_init() */ 211 callout_init(&sc->sc_tick_ch, 0); 212 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 213 214 /* setup ifp */ 215 ifp = &sc->sc_ethercom.ec_if; 216 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 217 ifp->if_softc = sc; 218 ifp->if_mtu = ETHERMTU; 219 ifp->if_baudrate = IF_Gbps(1); 220 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 221 ifp->if_ioctl = enet_ioctl; 222 ifp->if_start = enet_start; 223 ifp->if_init = enet_init; 224 ifp->if_stop = enet_stop; 225 ifp->if_watchdog = enet_watchdog; 226 227 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 228 #ifdef ENET_SUPPORT_JUMBO 229 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 230 #endif 231 232 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 233 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 234 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 235 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 236 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 237 238 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 239 IFQ_SET_READY(&ifp->if_snd); 240 241 /* setup MII */ 242 sc->sc_ethercom.ec_mii = mii; 243 mii->mii_ifp = ifp; 244 mii->mii_readreg = enet_miibus_readreg; 245 mii->mii_writereg = enet_miibus_writereg; 246 mii->mii_statchg = enet_miibus_statchg; 247 ifmedia_init(&mii->mii_media, 0, ether_mediachange, enet_mediastatus); 248 249 /* try to attach PHY */ 250 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 251 if (LIST_FIRST(&mii->mii_phys) == NULL) { 252 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 253 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 254 } else { 255 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 256 } 257 258 if_attach(ifp); 259 ether_ifattach(ifp, sc->sc_enaddr); 260 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 261 262 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 263 RND_TYPE_NET, RND_FLAG_DEFAULT); 264 265 #ifdef ENET_EVENT_COUNTER 266 enet_attach_evcnt(sc); 267 #endif 268 269 sc->sc_stopping = false; 270 271 return 0; 272 } 273 274 #ifdef ENET_EVENT_COUNTER 275 static void 276 enet_attach_evcnt(struct enet_softc *sc) 277 { 278 const char *xname; 279 280 xname = device_xname(sc->sc_dev); 281 282 #define ENET_EVCNT_ATTACH(name) \ 283 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 284 NULL, xname, #name); 285 286 ENET_EVCNT_ATTACH(t_drop); 287 ENET_EVCNT_ATTACH(t_packets); 288 ENET_EVCNT_ATTACH(t_bc_pkt); 289 ENET_EVCNT_ATTACH(t_mc_pkt); 290 ENET_EVCNT_ATTACH(t_crc_align); 291 ENET_EVCNT_ATTACH(t_undersize); 292 ENET_EVCNT_ATTACH(t_oversize); 293 ENET_EVCNT_ATTACH(t_frag); 294 ENET_EVCNT_ATTACH(t_jab); 295 ENET_EVCNT_ATTACH(t_col); 296 ENET_EVCNT_ATTACH(t_p64); 297 ENET_EVCNT_ATTACH(t_p65to127n); 298 ENET_EVCNT_ATTACH(t_p128to255n); 299 ENET_EVCNT_ATTACH(t_p256to511); 300 ENET_EVCNT_ATTACH(t_p512to1023); 301 ENET_EVCNT_ATTACH(t_p1024to2047); 302 ENET_EVCNT_ATTACH(t_p_gte2048); 303 ENET_EVCNT_ATTACH(t_octets); 304 ENET_EVCNT_ATTACH(r_packets); 305 ENET_EVCNT_ATTACH(r_bc_pkt); 306 ENET_EVCNT_ATTACH(r_mc_pkt); 307 ENET_EVCNT_ATTACH(r_crc_align); 308 ENET_EVCNT_ATTACH(r_undersize); 309 ENET_EVCNT_ATTACH(r_oversize); 310 ENET_EVCNT_ATTACH(r_frag); 311 ENET_EVCNT_ATTACH(r_jab); 312 ENET_EVCNT_ATTACH(r_p64); 313 ENET_EVCNT_ATTACH(r_p65to127); 314 ENET_EVCNT_ATTACH(r_p128to255); 315 ENET_EVCNT_ATTACH(r_p256to511); 316 ENET_EVCNT_ATTACH(r_p512to1023); 317 ENET_EVCNT_ATTACH(r_p1024to2047); 318 ENET_EVCNT_ATTACH(r_p_gte2048); 319 ENET_EVCNT_ATTACH(r_octets); 320 } 321 322 static void 323 enet_update_evcnt(struct enet_softc *sc) 324 { 325 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 326 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 327 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 328 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 329 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 330 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 331 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 332 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 333 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 334 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 335 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 336 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 337 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 338 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 339 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 340 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 341 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 342 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 343 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 344 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 345 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 346 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 347 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 348 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 349 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 350 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 351 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 352 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 353 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 354 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 355 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 356 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 357 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 358 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 359 } 360 #endif /* ENET_EVENT_COUNTER */ 361 362 static void 363 enet_tick(void *arg) 364 { 365 struct enet_softc *sc; 366 struct mii_data *mii; 367 struct ifnet *ifp; 368 int s; 369 370 sc = arg; 371 mii = &sc->sc_mii; 372 ifp = &sc->sc_ethercom.ec_if; 373 374 s = splnet(); 375 376 if (sc->sc_stopping) 377 goto out; 378 379 #ifdef ENET_EVENT_COUNTER 380 enet_update_evcnt(sc); 381 #endif 382 383 /* update counters */ 384 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 385 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 386 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 387 388 /* clear counters */ 389 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 390 ENET_REG_WRITE(sc, ENET_MIBC, 0); 391 392 mii_tick(mii); 393 out: 394 395 if (!sc->sc_stopping) 396 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 397 398 splx(s); 399 } 400 401 int 402 enet_intr(void *arg) 403 { 404 struct enet_softc *sc; 405 struct ifnet *ifp; 406 uint32_t status; 407 408 sc = arg; 409 status = ENET_REG_READ(sc, ENET_EIR); 410 411 if (sc->sc_imxtype == 7) { 412 if (status & (ENET_EIR_TXF | ENET_EIR_TXF1 | ENET_EIR_TXF2)) 413 enet_tx_intr(arg); 414 if (status & (ENET_EIR_RXF | ENET_EIR_RXF1 | ENET_EIR_RXF2)) 415 enet_rx_intr(arg); 416 } else { 417 if (status & ENET_EIR_TXF) 418 enet_tx_intr(arg); 419 if (status & ENET_EIR_RXF) 420 enet_rx_intr(arg); 421 } 422 423 if (status & ENET_EIR_EBERR) { 424 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 425 ifp = &sc->sc_ethercom.ec_if; 426 enet_stop(ifp, 1); 427 enet_init(ifp); 428 } else { 429 ENET_REG_WRITE(sc, ENET_EIR, status); 430 } 431 432 rnd_add_uint32(&sc->sc_rnd_source, status); 433 434 return 1; 435 } 436 437 static int 438 enet_tx_intr(void *arg) 439 { 440 struct enet_softc *sc; 441 struct ifnet *ifp; 442 struct enet_txsoft *txs; 443 int idx; 444 445 sc = (struct enet_softc *)arg; 446 ifp = &sc->sc_ethercom.ec_if; 447 448 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 449 idx = ENET_TX_NEXTIDX(idx)) { 450 451 txs = &sc->sc_txsoft[idx]; 452 453 TXDESC_READIN(idx); 454 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 455 /* This TX Descriptor has not been transmitted yet */ 456 break; 457 } 458 459 /* txsoft is available on first segment (TXFLAGS1_T1) */ 460 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 461 bus_dmamap_unload(sc->sc_dmat, 462 txs->txs_dmamap); 463 m_freem(txs->txs_mbuf); 464 ifp->if_opackets++; 465 } 466 467 /* checking error */ 468 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 469 uint32_t flags2; 470 471 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 472 473 if (flags2 & (TXFLAGS2_TXE | 474 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 475 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 476 #ifdef DEBUG_ENET 477 if (enet_debug) { 478 char flagsbuf[128]; 479 480 snprintb(flagsbuf, sizeof(flagsbuf), 481 "\20" "\20TRANSMIT" "\16UNDERFLOW" 482 "\15COLLISION" "\14FRAME" 483 "\13LATECOLLISION" "\12OVERFLOW", 484 flags2); 485 486 device_printf(sc->sc_dev, 487 "txdesc[%d]: transmit error: " 488 "flags2=%s\n", idx, flagsbuf); 489 } 490 #endif /* DEBUG_ENET */ 491 ifp->if_oerrors++; 492 } 493 } 494 495 sc->sc_tx_free++; 496 } 497 sc->sc_tx_considx = idx; 498 499 if (sc->sc_tx_free > 0) 500 ifp->if_flags &= ~IFF_OACTIVE; 501 502 /* 503 * No more pending TX descriptor, 504 * cancel the watchdog timer. 505 */ 506 if (sc->sc_tx_free == ENET_TX_RING_CNT) 507 ifp->if_timer = 0; 508 509 return 1; 510 } 511 512 static int 513 enet_rx_intr(void *arg) 514 { 515 struct enet_softc *sc; 516 struct ifnet *ifp; 517 struct enet_rxsoft *rxs; 518 int idx, len, amount; 519 uint32_t flags1, flags2; 520 struct mbuf *m, *m0, *mprev; 521 522 sc = arg; 523 ifp = &sc->sc_ethercom.ec_if; 524 525 m0 = mprev = NULL; 526 amount = 0; 527 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 528 529 rxs = &sc->sc_rxsoft[idx]; 530 531 RXDESC_READIN(idx); 532 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 533 /* This RX Descriptor has not been received yet */ 534 break; 535 } 536 537 /* 538 * build mbuf from RX Descriptor if needed 539 */ 540 m = rxs->rxs_mbuf; 541 rxs->rxs_mbuf = NULL; 542 543 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 544 len = RXFLAGS1_LEN(flags1); 545 546 #define RACC_SHIFT16 2 547 if (m0 == NULL) { 548 m0 = m; 549 m_adj(m0, RACC_SHIFT16); 550 len -= RACC_SHIFT16; 551 m->m_len = len; 552 amount = len; 553 } else { 554 if (flags1 & RXFLAGS1_L) 555 len = len - amount - RACC_SHIFT16; 556 557 m->m_len = len; 558 amount += len; 559 if (m->m_flags & M_PKTHDR) 560 m_remove_pkthdr(m); 561 mprev->m_next = m; 562 } 563 mprev = m; 564 565 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 566 567 if (flags1 & RXFLAGS1_L) { 568 /* last buffer */ 569 if ((amount < ETHER_HDR_LEN) || 570 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 571 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 572 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 573 RXFLAGS2_CE)))) { 574 575 #ifdef DEBUG_ENET 576 if (enet_debug) { 577 char flags1buf[128], flags2buf[128]; 578 snprintb(flags1buf, sizeof(flags1buf), 579 "\20" "\31MISS" "\26LENGTHVIOLATION" 580 "\25NONOCTET" "\23CRC" "\22OVERRUN" 581 "\21TRUNCATED", flags1); 582 snprintb(flags2buf, sizeof(flags2buf), 583 "\20" "\40MAC" "\33PHY" 584 "\32COLLISION", flags2); 585 586 DEVICE_DPRINTF( 587 "rxdesc[%d]: receive error: " 588 "flags1=%s,flags2=%s,len=%d\n", 589 idx, flags1buf, flags2buf, amount); 590 } 591 #endif /* DEBUG_ENET */ 592 ifp->if_ierrors++; 593 m_freem(m0); 594 595 } else { 596 /* packet receive ok */ 597 m_set_rcvif(m0, ifp); 598 m0->m_pkthdr.len = amount; 599 600 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 601 rxs->rxs_dmamap->dm_mapsize, 602 BUS_DMASYNC_PREREAD); 603 604 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 605 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 606 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 607 enet_rx_csum(sc, ifp, m0, idx); 608 609 if_percpuq_enqueue(ifp->if_percpuq, m0); 610 } 611 612 m0 = NULL; 613 mprev = NULL; 614 amount = 0; 615 616 } else { 617 /* continued from previous buffer */ 618 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 619 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 620 } 621 622 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 623 if (enet_alloc_rxbuf(sc, idx) != 0) { 624 panic("enet_alloc_rxbuf NULL\n"); 625 } 626 } 627 sc->sc_rx_readidx = idx; 628 629 /* re-enable RX DMA to make sure */ 630 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 631 632 return 1; 633 } 634 635 static void 636 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 637 { 638 uint32_t flags2; 639 uint8_t proto; 640 641 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 642 643 if (flags2 & RXFLAGS2_IPV6) { 644 proto = sc->sc_rxdesc_ring[idx].rx_proto; 645 646 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 647 if ((proto == IPPROTO_TCP) && 648 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 649 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 650 else if ((proto == IPPROTO_UDP) && 651 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 652 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 653 else 654 return; 655 656 /* IPv6 protocol checksum error */ 657 if (flags2 & RXFLAGS2_PCR) 658 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 659 660 } else { 661 struct ether_header *eh; 662 uint8_t *ip; 663 664 eh = mtod(m, struct ether_header *); 665 666 /* XXX: is an IPv4? */ 667 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 668 return; 669 ip = (uint8_t *)(eh + 1); 670 if ((ip[0] & 0xf0) == 0x40) 671 return; 672 673 proto = sc->sc_rxdesc_ring[idx].rx_proto; 674 if (flags2 & RXFLAGS2_ICE) { 675 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 676 m->m_pkthdr.csum_flags |= 677 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 678 } 679 } else { 680 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 681 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 682 } 683 684 /* 685 * PCR is valid when 686 * ICE == 0 and FRAG == 0 687 */ 688 if (flags2 & RXFLAGS2_FRAG) 689 return; 690 691 /* 692 * PCR is valid when proto is TCP or UDP 693 */ 694 if ((proto == IPPROTO_TCP) && 695 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 696 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 697 else if ((proto == IPPROTO_UDP) && 698 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 699 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 700 else 701 return; 702 703 /* IPv4 protocol cksum error */ 704 if (flags2 & RXFLAGS2_PCR) 705 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 706 } 707 } 708 } 709 710 static void 711 enet_setmulti(struct enet_softc *sc) 712 { 713 struct ethercom *ec = &sc->sc_ethercom; 714 struct ifnet *ifp = &ec->ec_if; 715 struct ether_multi *enm; 716 struct ether_multistep step; 717 int promisc; 718 uint32_t crc; 719 uint32_t gaddr[2]; 720 721 promisc = 0; 722 if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 0) { 723 ifp->if_flags |= IFF_ALLMULTI; 724 if (ifp->if_flags & IFF_PROMISC) 725 promisc = 1; 726 gaddr[0] = gaddr[1] = 0xffffffff; 727 } else { 728 gaddr[0] = gaddr[1] = 0; 729 730 ETHER_LOCK(ec); 731 ETHER_FIRST_MULTI(step, ec, enm); 732 while (enm != NULL) { 733 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 734 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 735 ETHER_NEXT_MULTI(step, enm); 736 } 737 ETHER_UNLOCK(ec); 738 } 739 740 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 741 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 742 743 if (promisc) { 744 /* match all packet */ 745 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 746 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 747 } else { 748 /* don't match any packet */ 749 ENET_REG_WRITE(sc, ENET_IAUR, 0); 750 ENET_REG_WRITE(sc, ENET_IALR, 0); 751 } 752 } 753 754 static void 755 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 756 { 757 uint32_t paddr; 758 759 paddr = ENET_REG_READ(sc, ENET_PALR); 760 hwaddr[0] = paddr >> 24; 761 hwaddr[1] = paddr >> 16; 762 hwaddr[2] = paddr >> 8; 763 hwaddr[3] = paddr; 764 765 paddr = ENET_REG_READ(sc, ENET_PAUR); 766 hwaddr[4] = paddr >> 24; 767 hwaddr[5] = paddr >> 16; 768 } 769 770 static void 771 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 772 { 773 uint32_t paddr; 774 775 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 776 hwaddr[3]; 777 ENET_REG_WRITE(sc, ENET_PALR, paddr); 778 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 779 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 780 } 781 782 /* 783 * ifnet interfaces 784 */ 785 static int 786 enet_init(struct ifnet *ifp) 787 { 788 struct enet_softc *sc; 789 int s, error; 790 791 sc = ifp->if_softc; 792 793 s = splnet(); 794 795 enet_init_regs(sc, 0); 796 enet_init_txring(sc); 797 error = enet_init_rxring(sc); 798 if (error != 0) { 799 enet_drain_rxbuf(sc); 800 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 801 goto init_failure; 802 } 803 804 /* reload mac address */ 805 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 806 enet_sethwaddr(sc, sc->sc_enaddr); 807 808 /* program multicast address */ 809 enet_setmulti(sc); 810 811 /* update if_flags */ 812 ifp->if_flags |= IFF_RUNNING; 813 ifp->if_flags &= ~IFF_OACTIVE; 814 815 /* update local copy of if_flags */ 816 sc->sc_if_flags = ifp->if_flags; 817 818 /* mii */ 819 mii_mediachg(&sc->sc_mii); 820 821 /* enable RX DMA */ 822 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 823 824 sc->sc_stopping = false; 825 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 826 827 init_failure: 828 splx(s); 829 830 return error; 831 } 832 833 static void 834 enet_start(struct ifnet *ifp) 835 { 836 struct enet_softc *sc; 837 struct mbuf *m; 838 int npkt; 839 840 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 841 return; 842 843 sc = ifp->if_softc; 844 for (npkt = 0; ; npkt++) { 845 IFQ_POLL(&ifp->if_snd, m); 846 if (m == NULL) 847 break; 848 849 if (sc->sc_tx_free <= 0) { 850 /* no tx descriptor now... */ 851 ifp->if_flags |= IFF_OACTIVE; 852 DEVICE_DPRINTF("TX descriptor is full\n"); 853 break; 854 } 855 856 IFQ_DEQUEUE(&ifp->if_snd, m); 857 858 if (enet_encap_txring(sc, &m) != 0) { 859 /* too many mbuf chains? */ 860 ifp->if_flags |= IFF_OACTIVE; 861 DEVICE_DPRINTF( 862 "TX descriptor is full. dropping packet\n"); 863 m_freem(m); 864 ifp->if_oerrors++; 865 break; 866 } 867 868 /* Pass the packet to any BPF listeners */ 869 bpf_mtap(ifp, m, BPF_D_OUT); 870 } 871 872 if (npkt) { 873 /* enable TX DMA */ 874 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 875 876 ifp->if_timer = 5; 877 } 878 } 879 880 static void 881 enet_stop(struct ifnet *ifp, int disable) 882 { 883 struct enet_softc *sc; 884 int s; 885 uint32_t v; 886 887 sc = ifp->if_softc; 888 889 s = splnet(); 890 891 sc->sc_stopping = true; 892 callout_stop(&sc->sc_tick_ch); 893 894 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 895 v = ENET_REG_READ(sc, ENET_ECR); 896 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 897 898 /* Mark the interface as down and cancel the watchdog timer. */ 899 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 900 ifp->if_timer = 0; 901 902 if (disable) { 903 enet_drain_txbuf(sc); 904 enet_drain_rxbuf(sc); 905 } 906 907 splx(s); 908 } 909 910 static void 911 enet_watchdog(struct ifnet *ifp) 912 { 913 struct enet_softc *sc; 914 int s; 915 916 sc = ifp->if_softc; 917 s = splnet(); 918 919 device_printf(sc->sc_dev, "watchdog timeout\n"); 920 ifp->if_oerrors++; 921 922 /* salvage packets left in descriptors */ 923 enet_tx_intr(sc); 924 enet_rx_intr(sc); 925 926 /* reset */ 927 enet_stop(ifp, 1); 928 enet_init(ifp); 929 930 splx(s); 931 } 932 933 static void 934 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 935 { 936 struct enet_softc *sc = ifp->if_softc; 937 938 ether_mediastatus(ifp, ifmr); 939 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 940 | sc->sc_flowflags; 941 } 942 943 static int 944 enet_ifflags_cb(struct ethercom *ec) 945 { 946 struct ifnet *ifp = &ec->ec_if; 947 struct enet_softc *sc = ifp->if_softc; 948 u_short change = ifp->if_flags ^ sc->sc_if_flags; 949 950 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 951 return ENETRESET; 952 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 953 return 0; 954 955 enet_setmulti(sc); 956 957 sc->sc_if_flags = ifp->if_flags; 958 return 0; 959 } 960 961 static int 962 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 963 { 964 struct enet_softc *sc; 965 struct ifreq *ifr; 966 int s, error; 967 uint32_t v; 968 969 sc = ifp->if_softc; 970 ifr = data; 971 972 error = 0; 973 974 s = splnet(); 975 976 switch (command) { 977 case SIOCSIFMTU: 978 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 979 error = EINVAL; 980 } else { 981 ifp->if_mtu = ifr->ifr_mtu; 982 983 /* set maximum frame length */ 984 v = MTU2FRAMESIZE(ifr->ifr_mtu); 985 ENET_REG_WRITE(sc, ENET_FTRL, v); 986 v = ENET_REG_READ(sc, ENET_RCR); 987 v &= ~ENET_RCR_MAX_FL(0x3fff); 988 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 989 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 990 ENET_REG_WRITE(sc, ENET_RCR, v); 991 } 992 break; 993 case SIOCSIFMEDIA: 994 /* Flow control requires full-duplex mode. */ 995 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 996 (ifr->ifr_media & IFM_FDX) == 0) 997 ifr->ifr_media &= ~IFM_ETH_FMASK; 998 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 999 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1000 /* We can do both TXPAUSE and RXPAUSE. */ 1001 ifr->ifr_media |= 1002 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1003 } 1004 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1005 } 1006 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1007 break; 1008 default: 1009 error = ether_ioctl(ifp, command, data); 1010 if (error != ENETRESET) 1011 break; 1012 1013 /* post-process */ 1014 error = 0; 1015 switch (command) { 1016 case SIOCSIFCAP: 1017 error = (*ifp->if_init)(ifp); 1018 break; 1019 case SIOCADDMULTI: 1020 case SIOCDELMULTI: 1021 if (ifp->if_flags & IFF_RUNNING) 1022 enet_setmulti(sc); 1023 break; 1024 } 1025 break; 1026 } 1027 1028 splx(s); 1029 1030 return error; 1031 } 1032 1033 /* 1034 * for MII 1035 */ 1036 static int 1037 enet_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1038 { 1039 struct enet_softc *sc; 1040 int timeout; 1041 uint32_t status; 1042 1043 sc = device_private(dev); 1044 1045 /* clear MII update */ 1046 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1047 1048 /* read command */ 1049 ENET_REG_WRITE(sc, ENET_MMFR, 1050 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1051 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1052 1053 /* check MII update */ 1054 for (timeout = 5000; timeout > 0; --timeout) { 1055 status = ENET_REG_READ(sc, ENET_EIR); 1056 if (status & ENET_EIR_MII) 1057 break; 1058 } 1059 if (timeout <= 0) { 1060 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1061 reg); 1062 return ETIMEDOUT; 1063 } else 1064 *val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1065 1066 return 0; 1067 } 1068 1069 static int 1070 enet_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1071 { 1072 struct enet_softc *sc; 1073 int timeout; 1074 1075 sc = device_private(dev); 1076 1077 /* clear MII update */ 1078 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1079 1080 /* write command */ 1081 ENET_REG_WRITE(sc, ENET_MMFR, 1082 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1083 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1084 (ENET_MMFR_DATAMASK & val)); 1085 1086 /* check MII update */ 1087 for (timeout = 5000; timeout > 0; --timeout) { 1088 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1089 break; 1090 } 1091 if (timeout <= 0) { 1092 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", reg); 1093 return ETIMEDOUT; 1094 } 1095 1096 return 0; 1097 } 1098 1099 static void 1100 enet_miibus_statchg(struct ifnet *ifp) 1101 { 1102 struct enet_softc *sc; 1103 struct mii_data *mii; 1104 struct ifmedia_entry *ife; 1105 uint32_t ecr, ecr0; 1106 uint32_t rcr, rcr0; 1107 uint32_t tcr, tcr0; 1108 1109 sc = ifp->if_softc; 1110 mii = &sc->sc_mii; 1111 ife = mii->mii_media.ifm_cur; 1112 1113 /* get current status */ 1114 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1115 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1116 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1117 1118 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1119 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1120 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1121 mii->mii_media_active &= ~IFM_ETH_FMASK; 1122 } 1123 1124 if ((ife->ifm_media & IFM_FDX) != 0) { 1125 tcr |= ENET_TCR_FDEN; /* full duplex */ 1126 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1127 } else { 1128 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1129 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1130 } 1131 1132 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1133 /* 1134 * need to reset because 1135 * FDEN can change when ECR[ETHEREN] is 0 1136 */ 1137 enet_init_regs(sc, 0); 1138 return; 1139 } 1140 1141 switch (IFM_SUBTYPE(ife->ifm_media)) { 1142 case IFM_AUTO: 1143 case IFM_1000_T: 1144 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1145 rcr &= ~ENET_RCR_RMII_10T; 1146 break; 1147 case IFM_100_TX: 1148 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1149 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1150 break; 1151 case IFM_10_T: 1152 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1153 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1154 break; 1155 default: 1156 ecr = ecr0; 1157 rcr = rcr0; 1158 tcr = tcr0; 1159 break; 1160 } 1161 1162 if (sc->sc_rgmii == 0) 1163 ecr &= ~ENET_ECR_SPEED; 1164 1165 if (sc->sc_flowflags & IFM_FLOW) 1166 rcr |= ENET_RCR_FCE; 1167 else 1168 rcr &= ~ENET_RCR_FCE; 1169 1170 /* update registers if need change */ 1171 if (ecr != ecr0) 1172 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1173 if (rcr != rcr0) 1174 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1175 if (tcr != tcr0) 1176 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1177 } 1178 1179 /* 1180 * handling descriptors 1181 */ 1182 static void 1183 enet_init_txring(struct enet_softc *sc) 1184 { 1185 int i; 1186 1187 /* build TX ring */ 1188 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1189 sc->sc_txdesc_ring[i].tx_flags1_len = 1190 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1191 sc->sc_txdesc_ring[i].tx_databuf = 0; 1192 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1193 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1194 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1195 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1196 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1197 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1198 1199 TXDESC_WRITEOUT(i); 1200 } 1201 1202 sc->sc_tx_free = ENET_TX_RING_CNT; 1203 sc->sc_tx_considx = 0; 1204 sc->sc_tx_prodidx = 0; 1205 } 1206 1207 static int 1208 enet_init_rxring(struct enet_softc *sc) 1209 { 1210 int i, error; 1211 1212 /* build RX ring */ 1213 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1214 error = enet_alloc_rxbuf(sc, i); 1215 if (error != 0) 1216 return error; 1217 } 1218 1219 sc->sc_rx_readidx = 0; 1220 1221 return 0; 1222 } 1223 1224 static int 1225 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1226 { 1227 struct mbuf *m; 1228 int error; 1229 1230 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1231 1232 /* free mbuf if already allocated */ 1233 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1234 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1235 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1236 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1237 } 1238 1239 /* allocate new mbuf cluster */ 1240 MGETHDR(m, M_DONTWAIT, MT_DATA); 1241 if (m == NULL) 1242 return ENOBUFS; 1243 MCLGET(m, M_DONTWAIT); 1244 if (!(m->m_flags & M_EXT)) { 1245 m_freem(m); 1246 return ENOBUFS; 1247 } 1248 m->m_len = MCLBYTES; 1249 m->m_next = NULL; 1250 1251 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1252 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1253 BUS_DMA_READ | BUS_DMA_NOWAIT); 1254 if (error) { 1255 m_freem(m); 1256 return error; 1257 } 1258 1259 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1260 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1261 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1262 1263 sc->sc_rxsoft[idx].rxs_mbuf = m; 1264 enet_reset_rxdesc(sc, idx); 1265 return 0; 1266 } 1267 1268 static void 1269 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1270 { 1271 uint32_t paddr; 1272 1273 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1274 1275 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1276 RXFLAGS1_E | 1277 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1278 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1279 sc->sc_rxdesc_ring[idx].rx_flags2 = 1280 RXFLAGS2_INT; 1281 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1282 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1283 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1284 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1285 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1286 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1287 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1288 1289 RXDESC_WRITEOUT(idx); 1290 } 1291 1292 static void 1293 enet_drain_txbuf(struct enet_softc *sc) 1294 { 1295 int idx; 1296 struct enet_txsoft *txs; 1297 struct ifnet *ifp; 1298 1299 ifp = &sc->sc_ethercom.ec_if; 1300 1301 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1302 idx = ENET_TX_NEXTIDX(idx)) { 1303 1304 /* txsoft[] is used only first segment */ 1305 txs = &sc->sc_txsoft[idx]; 1306 TXDESC_READIN(idx); 1307 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1308 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1309 bus_dmamap_unload(sc->sc_dmat, 1310 txs->txs_dmamap); 1311 m_freem(txs->txs_mbuf); 1312 1313 ifp->if_oerrors++; 1314 } 1315 sc->sc_tx_free++; 1316 } 1317 } 1318 1319 static void 1320 enet_drain_rxbuf(struct enet_softc *sc) 1321 { 1322 int i; 1323 1324 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1325 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1326 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1327 bus_dmamap_unload(sc->sc_dmat, 1328 sc->sc_rxsoft[i].rxs_dmamap); 1329 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1330 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1331 } 1332 } 1333 } 1334 1335 static int 1336 enet_alloc_ring(struct enet_softc *sc) 1337 { 1338 int i, error; 1339 1340 /* 1341 * build DMA maps for TX. 1342 * TX descriptor must be able to contain mbuf chains, 1343 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1344 */ 1345 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1346 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1347 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1348 &sc->sc_txsoft[i].txs_dmamap); 1349 1350 if (error) { 1351 aprint_error_dev(sc->sc_dev, 1352 "can't create DMA map for TX descs\n"); 1353 goto fail_1; 1354 } 1355 } 1356 1357 /* 1358 * build DMA maps for RX. 1359 * RX descripter contains An mbuf cluster, 1360 * and make up a dmamap. 1361 */ 1362 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1363 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1364 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1365 &sc->sc_rxsoft[i].rxs_dmamap); 1366 if (error) { 1367 aprint_error_dev(sc->sc_dev, 1368 "can't create DMA map for RX descs\n"); 1369 goto fail_2; 1370 } 1371 } 1372 1373 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1374 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1375 return -1; 1376 memset(sc->sc_txdesc_ring, 0, 1377 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1378 1379 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1380 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1381 return -1; 1382 memset(sc->sc_rxdesc_ring, 0, 1383 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1384 1385 return 0; 1386 1387 fail_2: 1388 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1389 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1390 bus_dmamap_destroy(sc->sc_dmat, 1391 sc->sc_rxsoft[i].rxs_dmamap); 1392 } 1393 fail_1: 1394 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1395 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1396 bus_dmamap_destroy(sc->sc_dmat, 1397 sc->sc_txsoft[i].txs_dmamap); 1398 } 1399 return error; 1400 } 1401 1402 static int 1403 enet_encap_mbufalign(struct mbuf **mp) 1404 { 1405 struct mbuf *m, *m0, *mt, *p, *x; 1406 void *ap; 1407 uint32_t alignoff, chiplen; 1408 1409 /* 1410 * iMX6 SoC ethernet controller requires 1411 * address of buffer must aligned 8, and 1412 * length of buffer must be greater than 10 (first fragment only?) 1413 */ 1414 #define ALIGNBYTE 8 1415 #define MINBUFSIZE 10 1416 #define ALIGN_PTR(p, align) \ 1417 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1418 1419 m0 = *mp; 1420 mt = p = NULL; 1421 for (m = m0; m != NULL; m = m->m_next) { 1422 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1423 if (m->m_len < (ALIGNBYTE * 2)) { 1424 /* 1425 * rearrange mbuf data aligned 1426 * 1427 * align 8 * * * * * 1428 * +0123456789abcdef0123456789abcdef0 1429 * FROM m->m_data[___________abcdefghijklmn_______] 1430 * 1431 * +0123456789abcdef0123456789abcdef0 1432 * TO m->m_data[________abcdefghijklm___________] or 1433 * m->m_data[________________abcdefghijklmn__] 1434 */ 1435 if ((alignoff != 0) && (m->m_len != 0)) { 1436 chiplen = ALIGNBYTE - alignoff; 1437 if (M_LEADINGSPACE(m) >= alignoff) { 1438 ap = m->m_data - alignoff; 1439 memmove(ap, m->m_data, m->m_len); 1440 m->m_data = ap; 1441 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1442 ap = m->m_data + chiplen; 1443 memmove(ap, m->m_data, m->m_len); 1444 m->m_data = ap; 1445 } else { 1446 /* 1447 * no space to align data. (M_READONLY?) 1448 * allocate new mbuf aligned, 1449 * and copy to it. 1450 */ 1451 MGET(x, M_DONTWAIT, m->m_type); 1452 if (x == NULL) { 1453 m_freem(m); 1454 return ENOBUFS; 1455 } 1456 MCLAIM(x, m->m_owner); 1457 if (m->m_flags & M_PKTHDR) 1458 m_move_pkthdr(x, m); 1459 x->m_len = m->m_len; 1460 x->m_data = ALIGN_PTR(x->m_data, 1461 ALIGNBYTE); 1462 memcpy(mtod(x, void *), mtod(m, void *), 1463 m->m_len); 1464 p->m_next = x; 1465 x->m_next = m_free(m); 1466 m = x; 1467 } 1468 } 1469 1470 /* 1471 * fill 1st mbuf at least 10byte 1472 * 1473 * align 8 * * * * * 1474 * +0123456789abcdef0123456789abcdef0 1475 * FROM m->m_data[________abcde___________________] 1476 * m->m_data[__fg____________________________] 1477 * m->m_data[_________________hi_____________] 1478 * m->m_data[__________jk____________________] 1479 * m->m_data[____l___________________________] 1480 * 1481 * +0123456789abcdef0123456789abcdef0 1482 * TO m->m_data[________abcdefghij______________] 1483 * m->m_data[________________________________] 1484 * m->m_data[________________________________] 1485 * m->m_data[___________k____________________] 1486 * m->m_data[____l___________________________] 1487 */ 1488 if (mt == NULL) { 1489 mt = m; 1490 while (mt->m_len == 0) { 1491 mt = mt->m_next; 1492 if (mt == NULL) { 1493 m_freem(m); 1494 return ENOBUFS; 1495 } 1496 } 1497 1498 /* mt = 1st mbuf, x = 2nd mbuf */ 1499 x = mt->m_next; 1500 while (mt->m_len < MINBUFSIZE) { 1501 if (x == NULL) { 1502 m_freem(m); 1503 return ENOBUFS; 1504 } 1505 1506 alignoff = (uintptr_t)x->m_data & 1507 (ALIGNBYTE - 1); 1508 chiplen = ALIGNBYTE - alignoff; 1509 if (chiplen > x->m_len) { 1510 chiplen = x->m_len; 1511 } else if ((mt->m_len + chiplen) < 1512 MINBUFSIZE) { 1513 /* 1514 * next mbuf should be greater 1515 * than ALIGNBYTE? 1516 */ 1517 if (x->m_len >= (chiplen + 1518 ALIGNBYTE * 2)) 1519 chiplen += ALIGNBYTE; 1520 else 1521 chiplen = x->m_len; 1522 } 1523 1524 if (chiplen && 1525 (M_TRAILINGSPACE(mt) < chiplen)) { 1526 /* 1527 * move data to the begining of 1528 * m_dat[] (aligned) to en- 1529 * large trailingspace 1530 */ 1531 ap = M_BUFADDR(mt); 1532 ap = ALIGN_PTR(ap, ALIGNBYTE); 1533 memcpy(ap, mt->m_data, 1534 mt->m_len); 1535 mt->m_data = ap; 1536 } 1537 1538 if (chiplen && 1539 (M_TRAILINGSPACE(mt) >= chiplen)) { 1540 memcpy(mt->m_data + mt->m_len, 1541 x->m_data, chiplen); 1542 mt->m_len += chiplen; 1543 m_adj(x, chiplen); 1544 } 1545 1546 x = x->m_next; 1547 } 1548 } 1549 1550 } else { 1551 mt = m; 1552 1553 /* 1554 * allocate new mbuf x, and rearrange as below; 1555 * 1556 * align 8 * * * * * 1557 * +0123456789abcdef0123456789abcdef0 1558 * FROM m->m_data[____________abcdefghijklmnopq___] 1559 * 1560 * +0123456789abcdef0123456789abcdef0 1561 * TO x->m_data[________abcdefghijkl____________] 1562 * m->m_data[________________________mnopq___] 1563 * 1564 */ 1565 if (alignoff != 0) { 1566 /* at least ALIGNBYTE */ 1567 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1568 1569 MGET(x, M_DONTWAIT, m->m_type); 1570 if (x == NULL) { 1571 m_freem(m); 1572 return ENOBUFS; 1573 } 1574 MCLAIM(x, m->m_owner); 1575 if (m->m_flags & M_PKTHDR) 1576 m_move_pkthdr(x, m); 1577 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1578 memcpy(mtod(x, void *), mtod(m, void *), 1579 chiplen); 1580 x->m_len = chiplen; 1581 x->m_next = m; 1582 m_adj(m, chiplen); 1583 1584 if (p == NULL) 1585 m0 = x; 1586 else 1587 p->m_next = x; 1588 } 1589 } 1590 p = m; 1591 } 1592 *mp = m0; 1593 1594 return 0; 1595 } 1596 1597 static int 1598 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1599 { 1600 bus_dmamap_t map; 1601 struct mbuf *m; 1602 int csumflags, idx, i, error; 1603 uint32_t flags1, flags2; 1604 1605 idx = sc->sc_tx_prodidx; 1606 map = sc->sc_txsoft[idx].txs_dmamap; 1607 1608 /* align mbuf data for claim of ENET */ 1609 error = enet_encap_mbufalign(mp); 1610 if (error != 0) 1611 return error; 1612 1613 m = *mp; 1614 csumflags = m->m_pkthdr.csum_flags; 1615 1616 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1617 BUS_DMA_NOWAIT); 1618 if (error != 0) { 1619 device_printf(sc->sc_dev, 1620 "Error mapping mbuf into TX chain: error=%d\n", error); 1621 m_freem(m); 1622 return error; 1623 } 1624 1625 if (map->dm_nsegs > sc->sc_tx_free) { 1626 bus_dmamap_unload(sc->sc_dmat, map); 1627 device_printf(sc->sc_dev, 1628 "too many mbuf chain %d\n", map->dm_nsegs); 1629 m_freem(m); 1630 return ENOBUFS; 1631 } 1632 1633 /* fill protocol cksum zero beforehand */ 1634 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1635 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1636 int ehlen; 1637 uint16_t etype; 1638 1639 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1640 switch (ntohs(etype)) { 1641 case ETHERTYPE_IP: 1642 case ETHERTYPE_IPV6: 1643 ehlen = ETHER_HDR_LEN; 1644 break; 1645 case ETHERTYPE_VLAN: 1646 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1647 break; 1648 default: 1649 ehlen = 0; 1650 break; 1651 } 1652 1653 if (ehlen) { 1654 const int off = 1655 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1656 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1657 if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1658 uint16_t zero = 0; 1659 m_copyback(m, ehlen + off, sizeof(zero), &zero); 1660 } 1661 } 1662 } 1663 1664 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1665 BUS_DMASYNC_PREWRITE); 1666 1667 for (i = 0; i < map->dm_nsegs; i++) { 1668 flags1 = TXFLAGS1_R; 1669 flags2 = 0; 1670 1671 if (i == 0) { 1672 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1673 sc->sc_txsoft[idx].txs_mbuf = m; 1674 } 1675 1676 /* checksum offloading */ 1677 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1678 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1679 flags2 |= TXFLAGS2_PINS; 1680 if (csumflags & (M_CSUM_IPv4)) 1681 flags2 |= TXFLAGS2_IINS; 1682 1683 if (i == map->dm_nsegs - 1) { 1684 /* mark last segment */ 1685 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1686 flags2 |= TXFLAGS2_INT; 1687 } 1688 if (idx == ENET_TX_RING_CNT - 1) { 1689 /* mark end of ring */ 1690 flags1 |= TXFLAGS1_W; 1691 } 1692 1693 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1694 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1695 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1696 TXDESC_WRITEOUT(idx); 1697 1698 sc->sc_txdesc_ring[idx].tx_flags1_len = 1699 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1700 TXDESC_WRITEOUT(idx); 1701 1702 idx = ENET_TX_NEXTIDX(idx); 1703 sc->sc_tx_free--; 1704 } 1705 1706 sc->sc_tx_prodidx = idx; 1707 1708 return 0; 1709 } 1710 1711 /* 1712 * device initialize 1713 */ 1714 static int 1715 enet_init_regs(struct enet_softc *sc, int init) 1716 { 1717 struct mii_data *mii; 1718 struct ifmedia_entry *ife; 1719 paddr_t paddr; 1720 uint32_t val; 1721 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1722 1723 if (init) { 1724 fulldup = 1; 1725 ecr_speed = ENET_ECR_SPEED; 1726 rcr_speed = 0; 1727 flowctrl = 0; 1728 } else { 1729 mii = &sc->sc_mii; 1730 ife = mii->mii_media.ifm_cur; 1731 1732 if ((ife->ifm_media & IFM_FDX) != 0) 1733 fulldup = 1; 1734 else 1735 fulldup = 0; 1736 1737 switch (IFM_SUBTYPE(ife->ifm_media)) { 1738 case IFM_10_T: 1739 ecr_speed = 0; 1740 rcr_speed = ENET_RCR_RMII_10T; 1741 break; 1742 case IFM_100_TX: 1743 ecr_speed = 0; 1744 rcr_speed = 0; 1745 break; 1746 default: 1747 ecr_speed = ENET_ECR_SPEED; 1748 rcr_speed = 0; 1749 break; 1750 } 1751 1752 flowctrl = sc->sc_flowflags & IFM_FLOW; 1753 } 1754 1755 if (sc->sc_rgmii == 0) 1756 ecr_speed = 0; 1757 1758 /* reset */ 1759 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1760 1761 /* mask and clear all interrupt */ 1762 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1763 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1764 1765 /* full duplex */ 1766 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1767 1768 /* clear and enable MIB register */ 1769 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1770 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1771 1772 /* MII speed setup. MDCclk(=2.5MHz) = (internal module clock)/((val+1)*2) */ 1773 val = (sc->sc_clock + (5000000 - 1)) / 5000000 - 1; 1774 ENET_REG_WRITE(sc, ENET_MSCR, __SHIFTIN(val, ENET_MSCR_MII_SPEED)); 1775 1776 /* Opcode/Pause Duration */ 1777 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1778 1779 /* Receive FIFO */ 1780 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1781 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1782 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1783 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1784 1785 /* Transmit FIFO */ 1786 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1787 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1788 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1789 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1790 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1791 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1792 1793 /* hardware checksum is default off (override in TX descripter) */ 1794 ENET_REG_WRITE(sc, ENET_TACC, 0); 1795 1796 /* 1797 * align ethernet payload on 32bit, discard frames with MAC layer error, 1798 * and don't discard checksum error 1799 */ 1800 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1801 1802 /* maximum frame size */ 1803 val = ENET_DEFAULT_PKT_LEN; 1804 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1805 1806 if (sc->sc_rgmii == 0) 1807 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1808 else 1809 miimode = ENET_RCR_RGMII_EN; 1810 ENET_REG_WRITE(sc, ENET_RCR, 1811 ENET_RCR_PADEN | /* RX frame padding remove */ 1812 miimode | 1813 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1814 rcr_speed | 1815 (fulldup ? 0 : ENET_RCR_DRT) | 1816 ENET_RCR_MAX_FL(val)); 1817 1818 /* Maximum Receive BufSize per one descriptor */ 1819 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1820 1821 1822 /* TX/RX Descriptor Physical Address */ 1823 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1824 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1825 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1826 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1827 /* sync cache */ 1828 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1829 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1830 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1831 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1832 1833 /* enable interrupts */ 1834 val = ENET_EIMR | ENET_EIR_TXF | ENET_EIR_RXF | ENET_EIR_EBERR; 1835 if (sc->sc_imxtype == 7) 1836 val |= ENET_EIR_TXF2 | ENET_EIR_RXF2 | ENET_EIR_TXF1 | 1837 ENET_EIR_RXF1; 1838 ENET_REG_WRITE(sc, ENET_EIMR, val); 1839 1840 /* enable ether */ 1841 ENET_REG_WRITE(sc, ENET_ECR, 1842 #if _BYTE_ORDER == _LITTLE_ENDIAN 1843 ENET_ECR_DBSWP | 1844 #endif 1845 ecr_speed | 1846 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1847 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1848 1849 return 0; 1850 } 1851 1852 static int 1853 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1854 bus_dmamap_t *mapp) 1855 { 1856 bus_dma_segment_t seglist[1]; 1857 int nsegs, error; 1858 1859 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1860 1, &nsegs, M_NOWAIT)) != 0) { 1861 device_printf(sc->sc_dev, 1862 "unable to allocate DMA buffer, error=%d\n", error); 1863 goto fail_alloc; 1864 } 1865 1866 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1867 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1868 device_printf(sc->sc_dev, 1869 "unable to map DMA buffer, error=%d\n", 1870 error); 1871 goto fail_map; 1872 } 1873 1874 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1875 BUS_DMA_NOWAIT, mapp)) != 0) { 1876 device_printf(sc->sc_dev, 1877 "unable to create DMA map, error=%d\n", error); 1878 goto fail_create; 1879 } 1880 1881 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1882 BUS_DMA_NOWAIT)) != 0) { 1883 aprint_error_dev(sc->sc_dev, 1884 "unable to load DMA map, error=%d\n", error); 1885 goto fail_load; 1886 } 1887 1888 return 0; 1889 1890 fail_load: 1891 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1892 fail_create: 1893 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1894 fail_map: 1895 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1896 fail_alloc: 1897 return error; 1898 } 1899