1 /* $NetBSD: if_enet.c,v 1.24 2019/07/23 06:36:36 hkenken Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.24 2019/07/23 06:36:36 hkenken Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static void enet_tick(void *); 136 static int enet_tx_intr(void *); 137 static int enet_rx_intr(void *); 138 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 139 int); 140 141 static void enet_start(struct ifnet *); 142 static int enet_ifflags_cb(struct ethercom *); 143 static int enet_ioctl(struct ifnet *, u_long, void *); 144 static int enet_init(struct ifnet *); 145 static void enet_stop(struct ifnet *, int); 146 static void enet_watchdog(struct ifnet *); 147 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 148 149 static int enet_miibus_readreg(device_t, int, int, uint16_t *); 150 static int enet_miibus_writereg(device_t, int, int, uint16_t); 151 static void enet_miibus_statchg(struct ifnet *); 152 153 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 154 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_setmulti(struct enet_softc *); 156 static int enet_encap_mbufalign(struct mbuf **); 157 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 158 static int enet_init_regs(struct enet_softc *, int); 159 static int enet_alloc_ring(struct enet_softc *); 160 static void enet_init_txring(struct enet_softc *); 161 static int enet_init_rxring(struct enet_softc *); 162 static void enet_reset_rxdesc(struct enet_softc *, int); 163 static int enet_alloc_rxbuf(struct enet_softc *, int); 164 static void enet_drain_txbuf(struct enet_softc *); 165 static void enet_drain_rxbuf(struct enet_softc *); 166 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 167 bus_dmamap_t *); 168 169 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 170 enet_match, enet_attach, NULL, NULL); 171 172 int 173 enet_attach_common(device_t self) 174 { 175 struct enet_softc *sc = device_private(self); 176 struct ifnet *ifp; 177 struct mii_data * const mii = &sc->sc_mii; 178 179 /* allocate dma buffer */ 180 if (enet_alloc_ring(sc)) 181 return -1; 182 183 #define IS_ENADDR_ZERO(enaddr) \ 184 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 185 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 186 187 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 188 /* by any chance, mac-address is already set by bootloader? */ 189 enet_gethwaddr(sc, sc->sc_enaddr); 190 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 191 /* give up. set randomly */ 192 uint32_t eaddr = random(); 193 /* not multicast */ 194 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 195 sc->sc_enaddr[1] = eaddr >> 16; 196 sc->sc_enaddr[2] = eaddr >> 8; 197 sc->sc_enaddr[3] = eaddr; 198 eaddr = random(); 199 sc->sc_enaddr[4] = eaddr >> 8; 200 sc->sc_enaddr[5] = eaddr; 201 202 aprint_error_dev(self, 203 "cannot get mac address. set randomly\n"); 204 } 205 } 206 enet_sethwaddr(sc, sc->sc_enaddr); 207 208 aprint_normal_dev(self, "Ethernet address %s\n", 209 ether_sprintf(sc->sc_enaddr)); 210 211 enet_init_regs(sc, 1); 212 213 /* callout will be scheduled from enet_init() */ 214 callout_init(&sc->sc_tick_ch, 0); 215 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 216 217 /* setup ifp */ 218 ifp = &sc->sc_ethercom.ec_if; 219 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 220 ifp->if_softc = sc; 221 ifp->if_mtu = ETHERMTU; 222 ifp->if_baudrate = IF_Gbps(1); 223 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 224 ifp->if_ioctl = enet_ioctl; 225 ifp->if_start = enet_start; 226 ifp->if_init = enet_init; 227 ifp->if_stop = enet_stop; 228 ifp->if_watchdog = enet_watchdog; 229 230 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 231 #ifdef ENET_SUPPORT_JUMBO 232 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 233 #endif 234 235 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 236 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 237 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 238 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 239 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 240 241 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 242 IFQ_SET_READY(&ifp->if_snd); 243 244 /* setup MII */ 245 sc->sc_ethercom.ec_mii = mii; 246 mii->mii_ifp = ifp; 247 mii->mii_readreg = enet_miibus_readreg; 248 mii->mii_writereg = enet_miibus_writereg; 249 mii->mii_statchg = enet_miibus_statchg; 250 ifmedia_init(&mii->mii_media, 0, ether_mediachange, enet_mediastatus); 251 252 /* try to attach PHY */ 253 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 254 if (LIST_FIRST(&mii->mii_phys) == NULL) { 255 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 256 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 257 } else { 258 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 259 } 260 261 if_attach(ifp); 262 ether_ifattach(ifp, sc->sc_enaddr); 263 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 264 265 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 266 RND_TYPE_NET, RND_FLAG_DEFAULT); 267 268 #ifdef ENET_EVENT_COUNTER 269 enet_attach_evcnt(sc); 270 #endif 271 272 sc->sc_stopping = false; 273 274 return 0; 275 } 276 277 #ifdef ENET_EVENT_COUNTER 278 static void 279 enet_attach_evcnt(struct enet_softc *sc) 280 { 281 const char *xname; 282 283 xname = device_xname(sc->sc_dev); 284 285 #define ENET_EVCNT_ATTACH(name) \ 286 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 287 NULL, xname, #name); 288 289 ENET_EVCNT_ATTACH(t_drop); 290 ENET_EVCNT_ATTACH(t_packets); 291 ENET_EVCNT_ATTACH(t_bc_pkt); 292 ENET_EVCNT_ATTACH(t_mc_pkt); 293 ENET_EVCNT_ATTACH(t_crc_align); 294 ENET_EVCNT_ATTACH(t_undersize); 295 ENET_EVCNT_ATTACH(t_oversize); 296 ENET_EVCNT_ATTACH(t_frag); 297 ENET_EVCNT_ATTACH(t_jab); 298 ENET_EVCNT_ATTACH(t_col); 299 ENET_EVCNT_ATTACH(t_p64); 300 ENET_EVCNT_ATTACH(t_p65to127n); 301 ENET_EVCNT_ATTACH(t_p128to255n); 302 ENET_EVCNT_ATTACH(t_p256to511); 303 ENET_EVCNT_ATTACH(t_p512to1023); 304 ENET_EVCNT_ATTACH(t_p1024to2047); 305 ENET_EVCNT_ATTACH(t_p_gte2048); 306 ENET_EVCNT_ATTACH(t_octets); 307 ENET_EVCNT_ATTACH(r_packets); 308 ENET_EVCNT_ATTACH(r_bc_pkt); 309 ENET_EVCNT_ATTACH(r_mc_pkt); 310 ENET_EVCNT_ATTACH(r_crc_align); 311 ENET_EVCNT_ATTACH(r_undersize); 312 ENET_EVCNT_ATTACH(r_oversize); 313 ENET_EVCNT_ATTACH(r_frag); 314 ENET_EVCNT_ATTACH(r_jab); 315 ENET_EVCNT_ATTACH(r_p64); 316 ENET_EVCNT_ATTACH(r_p65to127); 317 ENET_EVCNT_ATTACH(r_p128to255); 318 ENET_EVCNT_ATTACH(r_p256to511); 319 ENET_EVCNT_ATTACH(r_p512to1023); 320 ENET_EVCNT_ATTACH(r_p1024to2047); 321 ENET_EVCNT_ATTACH(r_p_gte2048); 322 ENET_EVCNT_ATTACH(r_octets); 323 } 324 325 static void 326 enet_update_evcnt(struct enet_softc *sc) 327 { 328 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 329 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 330 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 331 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 332 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 333 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 334 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 335 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 336 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 337 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 338 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 339 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 340 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 341 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 342 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 343 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 344 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 345 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 346 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 347 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 348 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 349 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 350 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 351 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 352 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 353 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 354 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 355 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 356 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 357 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 358 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 359 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 360 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 361 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 362 } 363 #endif /* ENET_EVENT_COUNTER */ 364 365 static void 366 enet_tick(void *arg) 367 { 368 struct enet_softc *sc; 369 struct mii_data *mii; 370 struct ifnet *ifp; 371 int s; 372 373 sc = arg; 374 mii = &sc->sc_mii; 375 ifp = &sc->sc_ethercom.ec_if; 376 377 s = splnet(); 378 379 if (sc->sc_stopping) 380 goto out; 381 382 #ifdef ENET_EVENT_COUNTER 383 enet_update_evcnt(sc); 384 #endif 385 386 /* update counters */ 387 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 388 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 389 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 390 391 /* clear counters */ 392 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 393 ENET_REG_WRITE(sc, ENET_MIBC, 0); 394 395 mii_tick(mii); 396 out: 397 398 if (!sc->sc_stopping) 399 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 400 401 splx(s); 402 } 403 404 int 405 enet_intr(void *arg) 406 { 407 struct enet_softc *sc; 408 struct ifnet *ifp; 409 uint32_t status; 410 411 sc = arg; 412 status = ENET_REG_READ(sc, ENET_EIR); 413 414 if (sc->sc_imxtype == 7) { 415 if (status & (ENET_EIR_TXF | ENET_EIR_TXF1 | ENET_EIR_TXF2)) 416 enet_tx_intr(arg); 417 if (status & (ENET_EIR_RXF | ENET_EIR_RXF1 | ENET_EIR_RXF2)) 418 enet_rx_intr(arg); 419 } else { 420 if (status & ENET_EIR_TXF) 421 enet_tx_intr(arg); 422 if (status & ENET_EIR_RXF) 423 enet_rx_intr(arg); 424 } 425 426 if (status & ENET_EIR_EBERR) { 427 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 428 ifp = &sc->sc_ethercom.ec_if; 429 enet_stop(ifp, 1); 430 enet_init(ifp); 431 } else { 432 ENET_REG_WRITE(sc, ENET_EIR, status); 433 } 434 435 rnd_add_uint32(&sc->sc_rnd_source, status); 436 437 return 1; 438 } 439 440 static int 441 enet_tx_intr(void *arg) 442 { 443 struct enet_softc *sc; 444 struct ifnet *ifp; 445 struct enet_txsoft *txs; 446 int idx; 447 448 sc = (struct enet_softc *)arg; 449 ifp = &sc->sc_ethercom.ec_if; 450 451 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 452 idx = ENET_TX_NEXTIDX(idx)) { 453 454 txs = &sc->sc_txsoft[idx]; 455 456 TXDESC_READIN(idx); 457 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 458 /* This TX Descriptor has not been transmitted yet */ 459 break; 460 } 461 462 /* txsoft is available on first segment (TXFLAGS1_T1) */ 463 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 464 bus_dmamap_unload(sc->sc_dmat, 465 txs->txs_dmamap); 466 m_freem(txs->txs_mbuf); 467 ifp->if_opackets++; 468 } 469 470 /* checking error */ 471 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 472 uint32_t flags2; 473 474 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 475 476 if (flags2 & (TXFLAGS2_TXE | 477 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 478 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 479 #ifdef DEBUG_ENET 480 if (enet_debug) { 481 char flagsbuf[128]; 482 483 snprintb(flagsbuf, sizeof(flagsbuf), 484 "\20" "\20TRANSMIT" "\16UNDERFLOW" 485 "\15COLLISION" "\14FRAME" 486 "\13LATECOLLISION" "\12OVERFLOW", 487 flags2); 488 489 device_printf(sc->sc_dev, 490 "txdesc[%d]: transmit error: " 491 "flags2=%s\n", idx, flagsbuf); 492 } 493 #endif /* DEBUG_ENET */ 494 ifp->if_oerrors++; 495 } 496 } 497 498 sc->sc_tx_free++; 499 } 500 sc->sc_tx_considx = idx; 501 502 if (sc->sc_tx_free > 0) 503 ifp->if_flags &= ~IFF_OACTIVE; 504 505 /* 506 * No more pending TX descriptor, 507 * cancel the watchdog timer. 508 */ 509 if (sc->sc_tx_free == ENET_TX_RING_CNT) 510 ifp->if_timer = 0; 511 512 return 1; 513 } 514 515 static int 516 enet_rx_intr(void *arg) 517 { 518 struct enet_softc *sc; 519 struct ifnet *ifp; 520 struct enet_rxsoft *rxs; 521 int idx, len, amount; 522 uint32_t flags1, flags2; 523 struct mbuf *m, *m0, *mprev; 524 525 sc = arg; 526 ifp = &sc->sc_ethercom.ec_if; 527 528 m0 = mprev = NULL; 529 amount = 0; 530 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 531 532 rxs = &sc->sc_rxsoft[idx]; 533 534 RXDESC_READIN(idx); 535 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 536 /* This RX Descriptor has not been received yet */ 537 break; 538 } 539 540 /* 541 * build mbuf from RX Descriptor if needed 542 */ 543 m = rxs->rxs_mbuf; 544 rxs->rxs_mbuf = NULL; 545 546 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 547 len = RXFLAGS1_LEN(flags1); 548 549 #define RACC_SHIFT16 2 550 if (m0 == NULL) { 551 m0 = m; 552 m_adj(m0, RACC_SHIFT16); 553 len -= RACC_SHIFT16; 554 m->m_len = len; 555 amount = len; 556 } else { 557 if (flags1 & RXFLAGS1_L) 558 len = len - amount - RACC_SHIFT16; 559 560 m->m_len = len; 561 amount += len; 562 if (m->m_flags & M_PKTHDR) 563 m_remove_pkthdr(m); 564 mprev->m_next = m; 565 } 566 mprev = m; 567 568 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 569 570 if (flags1 & RXFLAGS1_L) { 571 /* last buffer */ 572 if ((amount < ETHER_HDR_LEN) || 573 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 574 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 575 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 576 RXFLAGS2_CE)))) { 577 578 #ifdef DEBUG_ENET 579 if (enet_debug) { 580 char flags1buf[128], flags2buf[128]; 581 snprintb(flags1buf, sizeof(flags1buf), 582 "\20" "\31MISS" "\26LENGTHVIOLATION" 583 "\25NONOCTET" "\23CRC" "\22OVERRUN" 584 "\21TRUNCATED", flags1); 585 snprintb(flags2buf, sizeof(flags2buf), 586 "\20" "\40MAC" "\33PHY" 587 "\32COLLISION", flags2); 588 589 DEVICE_DPRINTF( 590 "rxdesc[%d]: receive error: " 591 "flags1=%s,flags2=%s,len=%d\n", 592 idx, flags1buf, flags2buf, amount); 593 } 594 #endif /* DEBUG_ENET */ 595 ifp->if_ierrors++; 596 m_freem(m0); 597 598 } else { 599 /* packet receive ok */ 600 m_set_rcvif(m0, ifp); 601 m0->m_pkthdr.len = amount; 602 603 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 604 rxs->rxs_dmamap->dm_mapsize, 605 BUS_DMASYNC_PREREAD); 606 607 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 608 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 609 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 610 enet_rx_csum(sc, ifp, m0, idx); 611 612 if_percpuq_enqueue(ifp->if_percpuq, m0); 613 } 614 615 m0 = NULL; 616 mprev = NULL; 617 amount = 0; 618 619 } else { 620 /* continued from previous buffer */ 621 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 622 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 623 } 624 625 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 626 if (enet_alloc_rxbuf(sc, idx) != 0) { 627 panic("enet_alloc_rxbuf NULL\n"); 628 } 629 } 630 sc->sc_rx_readidx = idx; 631 632 /* re-enable RX DMA to make sure */ 633 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 634 635 return 1; 636 } 637 638 static void 639 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 640 { 641 uint32_t flags2; 642 uint8_t proto; 643 644 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 645 646 if (flags2 & RXFLAGS2_IPV6) { 647 proto = sc->sc_rxdesc_ring[idx].rx_proto; 648 649 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 650 if ((proto == IPPROTO_TCP) && 651 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 652 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 653 else if ((proto == IPPROTO_UDP) && 654 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 655 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 656 else 657 return; 658 659 /* IPv6 protocol checksum error */ 660 if (flags2 & RXFLAGS2_PCR) 661 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 662 663 } else { 664 struct ether_header *eh; 665 uint8_t *ip; 666 667 eh = mtod(m, struct ether_header *); 668 669 /* XXX: is an IPv4? */ 670 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 671 return; 672 ip = (uint8_t *)(eh + 1); 673 if ((ip[0] & 0xf0) == 0x40) 674 return; 675 676 proto = sc->sc_rxdesc_ring[idx].rx_proto; 677 if (flags2 & RXFLAGS2_ICE) { 678 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 679 m->m_pkthdr.csum_flags |= 680 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 681 } 682 } else { 683 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 684 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 685 } 686 687 /* 688 * PCR is valid when 689 * ICE == 0 and FRAG == 0 690 */ 691 if (flags2 & RXFLAGS2_FRAG) 692 return; 693 694 /* 695 * PCR is valid when proto is TCP or UDP 696 */ 697 if ((proto == IPPROTO_TCP) && 698 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 699 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 700 else if ((proto == IPPROTO_UDP) && 701 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 702 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 703 else 704 return; 705 706 /* IPv4 protocol cksum error */ 707 if (flags2 & RXFLAGS2_PCR) 708 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 709 } 710 } 711 } 712 713 static void 714 enet_setmulti(struct enet_softc *sc) 715 { 716 struct ethercom *ec = &sc->sc_ethercom; 717 struct ifnet *ifp = &ec->ec_if; 718 struct ether_multi *enm; 719 struct ether_multistep step; 720 int promisc; 721 uint32_t crc; 722 uint32_t gaddr[2]; 723 724 promisc = 0; 725 if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 0) { 726 ifp->if_flags |= IFF_ALLMULTI; 727 if (ifp->if_flags & IFF_PROMISC) 728 promisc = 1; 729 gaddr[0] = gaddr[1] = 0xffffffff; 730 } else { 731 gaddr[0] = gaddr[1] = 0; 732 733 ETHER_LOCK(ec); 734 ETHER_FIRST_MULTI(step, ec, enm); 735 while (enm != NULL) { 736 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 737 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 738 ETHER_NEXT_MULTI(step, enm); 739 } 740 ETHER_UNLOCK(ec); 741 } 742 743 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 744 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 745 746 if (promisc) { 747 /* match all packet */ 748 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 749 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 750 } else { 751 /* don't match any packet */ 752 ENET_REG_WRITE(sc, ENET_IAUR, 0); 753 ENET_REG_WRITE(sc, ENET_IALR, 0); 754 } 755 } 756 757 static void 758 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 759 { 760 uint32_t paddr; 761 762 paddr = ENET_REG_READ(sc, ENET_PALR); 763 hwaddr[0] = paddr >> 24; 764 hwaddr[1] = paddr >> 16; 765 hwaddr[2] = paddr >> 8; 766 hwaddr[3] = paddr; 767 768 paddr = ENET_REG_READ(sc, ENET_PAUR); 769 hwaddr[4] = paddr >> 24; 770 hwaddr[5] = paddr >> 16; 771 } 772 773 static void 774 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 775 { 776 uint32_t paddr; 777 778 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 779 hwaddr[3]; 780 ENET_REG_WRITE(sc, ENET_PALR, paddr); 781 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 782 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 783 } 784 785 /* 786 * ifnet interfaces 787 */ 788 static int 789 enet_init(struct ifnet *ifp) 790 { 791 struct enet_softc *sc; 792 int s, error; 793 794 sc = ifp->if_softc; 795 796 s = splnet(); 797 798 enet_init_regs(sc, 0); 799 enet_init_txring(sc); 800 error = enet_init_rxring(sc); 801 if (error != 0) { 802 enet_drain_rxbuf(sc); 803 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 804 goto init_failure; 805 } 806 807 /* reload mac address */ 808 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 809 enet_sethwaddr(sc, sc->sc_enaddr); 810 811 /* program multicast address */ 812 enet_setmulti(sc); 813 814 /* update if_flags */ 815 ifp->if_flags |= IFF_RUNNING; 816 ifp->if_flags &= ~IFF_OACTIVE; 817 818 /* update local copy of if_flags */ 819 sc->sc_if_flags = ifp->if_flags; 820 821 /* mii */ 822 mii_mediachg(&sc->sc_mii); 823 824 /* enable RX DMA */ 825 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 826 827 sc->sc_stopping = false; 828 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 829 830 init_failure: 831 splx(s); 832 833 return error; 834 } 835 836 static void 837 enet_start(struct ifnet *ifp) 838 { 839 struct enet_softc *sc; 840 struct mbuf *m; 841 int npkt; 842 843 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 844 return; 845 846 sc = ifp->if_softc; 847 for (npkt = 0; ; npkt++) { 848 IFQ_POLL(&ifp->if_snd, m); 849 if (m == NULL) 850 break; 851 852 if (sc->sc_tx_free <= 0) { 853 /* no tx descriptor now... */ 854 ifp->if_flags |= IFF_OACTIVE; 855 DEVICE_DPRINTF("TX descriptor is full\n"); 856 break; 857 } 858 859 IFQ_DEQUEUE(&ifp->if_snd, m); 860 861 if (enet_encap_txring(sc, &m) != 0) { 862 /* too many mbuf chains? */ 863 ifp->if_flags |= IFF_OACTIVE; 864 DEVICE_DPRINTF( 865 "TX descriptor is full. dropping packet\n"); 866 m_freem(m); 867 ifp->if_oerrors++; 868 break; 869 } 870 871 /* Pass the packet to any BPF listeners */ 872 bpf_mtap(ifp, m, BPF_D_OUT); 873 } 874 875 if (npkt) { 876 /* enable TX DMA */ 877 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 878 879 ifp->if_timer = 5; 880 } 881 } 882 883 static void 884 enet_stop(struct ifnet *ifp, int disable) 885 { 886 struct enet_softc *sc; 887 int s; 888 uint32_t v; 889 890 sc = ifp->if_softc; 891 892 s = splnet(); 893 894 sc->sc_stopping = true; 895 callout_stop(&sc->sc_tick_ch); 896 897 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 898 v = ENET_REG_READ(sc, ENET_ECR); 899 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 900 901 /* Mark the interface as down and cancel the watchdog timer. */ 902 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 903 ifp->if_timer = 0; 904 905 if (disable) { 906 enet_drain_txbuf(sc); 907 enet_drain_rxbuf(sc); 908 } 909 910 splx(s); 911 } 912 913 static void 914 enet_watchdog(struct ifnet *ifp) 915 { 916 struct enet_softc *sc; 917 int s; 918 919 sc = ifp->if_softc; 920 s = splnet(); 921 922 device_printf(sc->sc_dev, "watchdog timeout\n"); 923 ifp->if_oerrors++; 924 925 /* salvage packets left in descriptors */ 926 enet_tx_intr(sc); 927 enet_rx_intr(sc); 928 929 /* reset */ 930 enet_stop(ifp, 1); 931 enet_init(ifp); 932 933 splx(s); 934 } 935 936 static void 937 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 938 { 939 struct enet_softc *sc = ifp->if_softc; 940 941 ether_mediastatus(ifp, ifmr); 942 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 943 | sc->sc_flowflags; 944 } 945 946 static int 947 enet_ifflags_cb(struct ethercom *ec) 948 { 949 struct ifnet *ifp = &ec->ec_if; 950 struct enet_softc *sc = ifp->if_softc; 951 int change = ifp->if_flags ^ sc->sc_if_flags; 952 953 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 954 return ENETRESET; 955 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 956 return 0; 957 958 enet_setmulti(sc); 959 960 sc->sc_if_flags = ifp->if_flags; 961 return 0; 962 } 963 964 static int 965 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 966 { 967 struct enet_softc *sc; 968 struct ifreq *ifr; 969 int s, error; 970 uint32_t v; 971 972 sc = ifp->if_softc; 973 ifr = data; 974 975 error = 0; 976 977 s = splnet(); 978 979 switch (command) { 980 case SIOCSIFMTU: 981 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 982 error = EINVAL; 983 } else { 984 ifp->if_mtu = ifr->ifr_mtu; 985 986 /* set maximum frame length */ 987 v = MTU2FRAMESIZE(ifr->ifr_mtu); 988 ENET_REG_WRITE(sc, ENET_FTRL, v); 989 v = ENET_REG_READ(sc, ENET_RCR); 990 v &= ~ENET_RCR_MAX_FL(0x3fff); 991 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 992 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 993 ENET_REG_WRITE(sc, ENET_RCR, v); 994 } 995 break; 996 case SIOCSIFMEDIA: 997 /* Flow control requires full-duplex mode. */ 998 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 999 (ifr->ifr_media & IFM_FDX) == 0) 1000 ifr->ifr_media &= ~IFM_ETH_FMASK; 1001 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1002 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1003 /* We can do both TXPAUSE and RXPAUSE. */ 1004 ifr->ifr_media |= 1005 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1006 } 1007 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1008 } 1009 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1010 break; 1011 default: 1012 error = ether_ioctl(ifp, command, data); 1013 if (error != ENETRESET) 1014 break; 1015 1016 /* post-process */ 1017 error = 0; 1018 switch (command) { 1019 case SIOCSIFCAP: 1020 error = (*ifp->if_init)(ifp); 1021 break; 1022 case SIOCADDMULTI: 1023 case SIOCDELMULTI: 1024 if (ifp->if_flags & IFF_RUNNING) 1025 enet_setmulti(sc); 1026 break; 1027 } 1028 break; 1029 } 1030 1031 splx(s); 1032 1033 return error; 1034 } 1035 1036 /* 1037 * for MII 1038 */ 1039 static int 1040 enet_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1041 { 1042 struct enet_softc *sc; 1043 int timeout; 1044 uint32_t status; 1045 1046 sc = device_private(dev); 1047 1048 /* clear MII update */ 1049 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1050 1051 /* read command */ 1052 ENET_REG_WRITE(sc, ENET_MMFR, 1053 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1054 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1055 1056 /* check MII update */ 1057 for (timeout = 5000; timeout > 0; --timeout) { 1058 status = ENET_REG_READ(sc, ENET_EIR); 1059 if (status & ENET_EIR_MII) 1060 break; 1061 } 1062 if (timeout <= 0) { 1063 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1064 reg); 1065 return ETIMEDOUT; 1066 } else 1067 *val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1068 1069 return 0; 1070 } 1071 1072 static int 1073 enet_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1074 { 1075 struct enet_softc *sc; 1076 int timeout; 1077 1078 sc = device_private(dev); 1079 1080 /* clear MII update */ 1081 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1082 1083 /* write command */ 1084 ENET_REG_WRITE(sc, ENET_MMFR, 1085 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1086 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1087 (ENET_MMFR_DATAMASK & val)); 1088 1089 /* check MII update */ 1090 for (timeout = 5000; timeout > 0; --timeout) { 1091 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1092 break; 1093 } 1094 if (timeout <= 0) { 1095 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", reg); 1096 return ETIMEDOUT; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static void 1103 enet_miibus_statchg(struct ifnet *ifp) 1104 { 1105 struct enet_softc *sc; 1106 struct mii_data *mii; 1107 struct ifmedia_entry *ife; 1108 uint32_t ecr, ecr0; 1109 uint32_t rcr, rcr0; 1110 uint32_t tcr, tcr0; 1111 1112 sc = ifp->if_softc; 1113 mii = &sc->sc_mii; 1114 ife = mii->mii_media.ifm_cur; 1115 1116 /* get current status */ 1117 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1118 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1119 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1120 1121 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1122 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1123 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1124 mii->mii_media_active &= ~IFM_ETH_FMASK; 1125 } 1126 1127 if ((ife->ifm_media & IFM_FDX) != 0) { 1128 tcr |= ENET_TCR_FDEN; /* full duplex */ 1129 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1130 } else { 1131 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1132 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1133 } 1134 1135 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1136 /* 1137 * need to reset because 1138 * FDEN can change when ECR[ETHEREN] is 0 1139 */ 1140 enet_init_regs(sc, 0); 1141 return; 1142 } 1143 1144 switch (IFM_SUBTYPE(ife->ifm_media)) { 1145 case IFM_AUTO: 1146 case IFM_1000_T: 1147 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1148 rcr &= ~ENET_RCR_RMII_10T; 1149 break; 1150 case IFM_100_TX: 1151 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1152 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1153 break; 1154 case IFM_10_T: 1155 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1156 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1157 break; 1158 default: 1159 ecr = ecr0; 1160 rcr = rcr0; 1161 tcr = tcr0; 1162 break; 1163 } 1164 1165 if (sc->sc_rgmii == 0) 1166 ecr &= ~ENET_ECR_SPEED; 1167 1168 if (sc->sc_flowflags & IFM_FLOW) 1169 rcr |= ENET_RCR_FCE; 1170 else 1171 rcr &= ~ENET_RCR_FCE; 1172 1173 /* update registers if need change */ 1174 if (ecr != ecr0) 1175 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1176 if (rcr != rcr0) 1177 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1178 if (tcr != tcr0) 1179 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1180 } 1181 1182 /* 1183 * handling descriptors 1184 */ 1185 static void 1186 enet_init_txring(struct enet_softc *sc) 1187 { 1188 int i; 1189 1190 /* build TX ring */ 1191 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1192 sc->sc_txdesc_ring[i].tx_flags1_len = 1193 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1194 sc->sc_txdesc_ring[i].tx_databuf = 0; 1195 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1196 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1197 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1198 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1199 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1200 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1201 1202 TXDESC_WRITEOUT(i); 1203 } 1204 1205 sc->sc_tx_free = ENET_TX_RING_CNT; 1206 sc->sc_tx_considx = 0; 1207 sc->sc_tx_prodidx = 0; 1208 } 1209 1210 static int 1211 enet_init_rxring(struct enet_softc *sc) 1212 { 1213 int i, error; 1214 1215 /* build RX ring */ 1216 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1217 error = enet_alloc_rxbuf(sc, i); 1218 if (error != 0) 1219 return error; 1220 } 1221 1222 sc->sc_rx_readidx = 0; 1223 1224 return 0; 1225 } 1226 1227 static int 1228 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1229 { 1230 struct mbuf *m; 1231 int error; 1232 1233 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1234 1235 /* free mbuf if already allocated */ 1236 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1237 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1238 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1239 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1240 } 1241 1242 /* allocate new mbuf cluster */ 1243 MGETHDR(m, M_DONTWAIT, MT_DATA); 1244 if (m == NULL) 1245 return ENOBUFS; 1246 MCLGET(m, M_DONTWAIT); 1247 if (!(m->m_flags & M_EXT)) { 1248 m_freem(m); 1249 return ENOBUFS; 1250 } 1251 m->m_len = MCLBYTES; 1252 m->m_next = NULL; 1253 1254 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1255 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1256 BUS_DMA_READ | BUS_DMA_NOWAIT); 1257 if (error) { 1258 m_freem(m); 1259 return error; 1260 } 1261 1262 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1263 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1264 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1265 1266 sc->sc_rxsoft[idx].rxs_mbuf = m; 1267 enet_reset_rxdesc(sc, idx); 1268 return 0; 1269 } 1270 1271 static void 1272 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1273 { 1274 uint32_t paddr; 1275 1276 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1277 1278 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1279 RXFLAGS1_E | 1280 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1281 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1282 sc->sc_rxdesc_ring[idx].rx_flags2 = 1283 RXFLAGS2_INT; 1284 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1285 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1286 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1287 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1288 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1289 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1290 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1291 1292 RXDESC_WRITEOUT(idx); 1293 } 1294 1295 static void 1296 enet_drain_txbuf(struct enet_softc *sc) 1297 { 1298 int idx; 1299 struct enet_txsoft *txs; 1300 struct ifnet *ifp; 1301 1302 ifp = &sc->sc_ethercom.ec_if; 1303 1304 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1305 idx = ENET_TX_NEXTIDX(idx)) { 1306 1307 /* txsoft[] is used only first segment */ 1308 txs = &sc->sc_txsoft[idx]; 1309 TXDESC_READIN(idx); 1310 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1311 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1312 bus_dmamap_unload(sc->sc_dmat, 1313 txs->txs_dmamap); 1314 m_freem(txs->txs_mbuf); 1315 1316 ifp->if_oerrors++; 1317 } 1318 sc->sc_tx_free++; 1319 } 1320 } 1321 1322 static void 1323 enet_drain_rxbuf(struct enet_softc *sc) 1324 { 1325 int i; 1326 1327 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1328 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1329 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1330 bus_dmamap_unload(sc->sc_dmat, 1331 sc->sc_rxsoft[i].rxs_dmamap); 1332 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1333 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1334 } 1335 } 1336 } 1337 1338 static int 1339 enet_alloc_ring(struct enet_softc *sc) 1340 { 1341 int i, error; 1342 1343 /* 1344 * build DMA maps for TX. 1345 * TX descriptor must be able to contain mbuf chains, 1346 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1347 */ 1348 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1349 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1350 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1351 &sc->sc_txsoft[i].txs_dmamap); 1352 1353 if (error) { 1354 aprint_error_dev(sc->sc_dev, 1355 "can't create DMA map for TX descs\n"); 1356 goto fail_1; 1357 } 1358 } 1359 1360 /* 1361 * build DMA maps for RX. 1362 * RX descripter contains An mbuf cluster, 1363 * and make up a dmamap. 1364 */ 1365 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1366 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1367 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1368 &sc->sc_rxsoft[i].rxs_dmamap); 1369 if (error) { 1370 aprint_error_dev(sc->sc_dev, 1371 "can't create DMA map for RX descs\n"); 1372 goto fail_2; 1373 } 1374 } 1375 1376 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1377 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1378 return -1; 1379 memset(sc->sc_txdesc_ring, 0, 1380 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1381 1382 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1383 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1384 return -1; 1385 memset(sc->sc_rxdesc_ring, 0, 1386 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1387 1388 return 0; 1389 1390 fail_2: 1391 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1392 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1393 bus_dmamap_destroy(sc->sc_dmat, 1394 sc->sc_rxsoft[i].rxs_dmamap); 1395 } 1396 fail_1: 1397 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1398 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1399 bus_dmamap_destroy(sc->sc_dmat, 1400 sc->sc_txsoft[i].txs_dmamap); 1401 } 1402 return error; 1403 } 1404 1405 static int 1406 enet_encap_mbufalign(struct mbuf **mp) 1407 { 1408 struct mbuf *m, *m0, *mt, *p, *x; 1409 void *ap; 1410 uint32_t alignoff, chiplen; 1411 1412 /* 1413 * iMX6 SoC ethernet controller requires 1414 * address of buffer must aligned 8, and 1415 * length of buffer must be greater than 10 (first fragment only?) 1416 */ 1417 #define ALIGNBYTE 8 1418 #define MINBUFSIZE 10 1419 #define ALIGN_PTR(p, align) \ 1420 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1421 1422 m0 = *mp; 1423 mt = p = NULL; 1424 for (m = m0; m != NULL; m = m->m_next) { 1425 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1426 if (m->m_len < (ALIGNBYTE * 2)) { 1427 /* 1428 * rearrange mbuf data aligned 1429 * 1430 * align 8 * * * * * 1431 * +0123456789abcdef0123456789abcdef0 1432 * FROM m->m_data[___________abcdefghijklmn_______] 1433 * 1434 * +0123456789abcdef0123456789abcdef0 1435 * TO m->m_data[________abcdefghijklm___________] or 1436 * m->m_data[________________abcdefghijklmn__] 1437 */ 1438 if ((alignoff != 0) && (m->m_len != 0)) { 1439 chiplen = ALIGNBYTE - alignoff; 1440 if (M_LEADINGSPACE(m) >= alignoff) { 1441 ap = m->m_data - alignoff; 1442 memmove(ap, m->m_data, m->m_len); 1443 m->m_data = ap; 1444 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1445 ap = m->m_data + chiplen; 1446 memmove(ap, m->m_data, m->m_len); 1447 m->m_data = ap; 1448 } else { 1449 /* 1450 * no space to align data. (M_READONLY?) 1451 * allocate new mbuf aligned, 1452 * and copy to it. 1453 */ 1454 MGET(x, M_DONTWAIT, m->m_type); 1455 if (x == NULL) { 1456 m_freem(m); 1457 return ENOBUFS; 1458 } 1459 MCLAIM(x, m->m_owner); 1460 if (m->m_flags & M_PKTHDR) 1461 m_move_pkthdr(x, m); 1462 x->m_len = m->m_len; 1463 x->m_data = ALIGN_PTR(x->m_data, 1464 ALIGNBYTE); 1465 memcpy(mtod(x, void *), mtod(m, void *), 1466 m->m_len); 1467 p->m_next = x; 1468 x->m_next = m_free(m); 1469 m = x; 1470 } 1471 } 1472 1473 /* 1474 * fill 1st mbuf at least 10byte 1475 * 1476 * align 8 * * * * * 1477 * +0123456789abcdef0123456789abcdef0 1478 * FROM m->m_data[________abcde___________________] 1479 * m->m_data[__fg____________________________] 1480 * m->m_data[_________________hi_____________] 1481 * m->m_data[__________jk____________________] 1482 * m->m_data[____l___________________________] 1483 * 1484 * +0123456789abcdef0123456789abcdef0 1485 * TO m->m_data[________abcdefghij______________] 1486 * m->m_data[________________________________] 1487 * m->m_data[________________________________] 1488 * m->m_data[___________k____________________] 1489 * m->m_data[____l___________________________] 1490 */ 1491 if (mt == NULL) { 1492 mt = m; 1493 while (mt->m_len == 0) { 1494 mt = mt->m_next; 1495 if (mt == NULL) { 1496 m_freem(m); 1497 return ENOBUFS; 1498 } 1499 } 1500 1501 /* mt = 1st mbuf, x = 2nd mbuf */ 1502 x = mt->m_next; 1503 while (mt->m_len < MINBUFSIZE) { 1504 if (x == NULL) { 1505 m_freem(m); 1506 return ENOBUFS; 1507 } 1508 1509 alignoff = (uintptr_t)x->m_data & 1510 (ALIGNBYTE - 1); 1511 chiplen = ALIGNBYTE - alignoff; 1512 if (chiplen > x->m_len) { 1513 chiplen = x->m_len; 1514 } else if ((mt->m_len + chiplen) < 1515 MINBUFSIZE) { 1516 /* 1517 * next mbuf should be greater 1518 * than ALIGNBYTE? 1519 */ 1520 if (x->m_len >= (chiplen + 1521 ALIGNBYTE * 2)) 1522 chiplen += ALIGNBYTE; 1523 else 1524 chiplen = x->m_len; 1525 } 1526 1527 if (chiplen && 1528 (M_TRAILINGSPACE(mt) < chiplen)) { 1529 /* 1530 * move data to the begining of 1531 * m_dat[] (aligned) to en- 1532 * large trailingspace 1533 */ 1534 if (mt->m_flags & M_EXT) { 1535 ap = mt->m_ext.ext_buf; 1536 } else if (mt->m_flags & 1537 M_PKTHDR) { 1538 ap = mt->m_pktdat; 1539 } else { 1540 ap = mt->m_dat; 1541 } 1542 ap = ALIGN_PTR(ap, ALIGNBYTE); 1543 memcpy(ap, mt->m_data, 1544 mt->m_len); 1545 mt->m_data = ap; 1546 } 1547 1548 if (chiplen && 1549 (M_TRAILINGSPACE(mt) >= chiplen)) { 1550 memcpy(mt->m_data + mt->m_len, 1551 x->m_data, chiplen); 1552 mt->m_len += chiplen; 1553 m_adj(x, chiplen); 1554 } 1555 1556 x = x->m_next; 1557 } 1558 } 1559 1560 } else { 1561 mt = m; 1562 1563 /* 1564 * allocate new mbuf x, and rearrange as below; 1565 * 1566 * align 8 * * * * * 1567 * +0123456789abcdef0123456789abcdef0 1568 * FROM m->m_data[____________abcdefghijklmnopq___] 1569 * 1570 * +0123456789abcdef0123456789abcdef0 1571 * TO x->m_data[________abcdefghijkl____________] 1572 * m->m_data[________________________mnopq___] 1573 * 1574 */ 1575 if (alignoff != 0) { 1576 /* at least ALIGNBYTE */ 1577 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1578 1579 MGET(x, M_DONTWAIT, m->m_type); 1580 if (x == NULL) { 1581 m_freem(m); 1582 return ENOBUFS; 1583 } 1584 MCLAIM(x, m->m_owner); 1585 if (m->m_flags & M_PKTHDR) 1586 m_move_pkthdr(x, m); 1587 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1588 memcpy(mtod(x, void *), mtod(m, void *), 1589 chiplen); 1590 x->m_len = chiplen; 1591 x->m_next = m; 1592 m_adj(m, chiplen); 1593 1594 if (p == NULL) 1595 m0 = x; 1596 else 1597 p->m_next = x; 1598 } 1599 } 1600 p = m; 1601 } 1602 *mp = m0; 1603 1604 return 0; 1605 } 1606 1607 static int 1608 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1609 { 1610 bus_dmamap_t map; 1611 struct mbuf *m; 1612 int csumflags, idx, i, error; 1613 uint32_t flags1, flags2; 1614 1615 idx = sc->sc_tx_prodidx; 1616 map = sc->sc_txsoft[idx].txs_dmamap; 1617 1618 /* align mbuf data for claim of ENET */ 1619 error = enet_encap_mbufalign(mp); 1620 if (error != 0) 1621 return error; 1622 1623 m = *mp; 1624 csumflags = m->m_pkthdr.csum_flags; 1625 1626 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1627 BUS_DMA_NOWAIT); 1628 if (error != 0) { 1629 device_printf(sc->sc_dev, 1630 "Error mapping mbuf into TX chain: error=%d\n", error); 1631 m_freem(m); 1632 return error; 1633 } 1634 1635 if (map->dm_nsegs > sc->sc_tx_free) { 1636 bus_dmamap_unload(sc->sc_dmat, map); 1637 device_printf(sc->sc_dev, 1638 "too many mbuf chain %d\n", map->dm_nsegs); 1639 m_freem(m); 1640 return ENOBUFS; 1641 } 1642 1643 /* fill protocol cksum zero beforehand */ 1644 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1645 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1646 int ehlen; 1647 uint16_t etype; 1648 1649 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1650 switch (ntohs(etype)) { 1651 case ETHERTYPE_IP: 1652 case ETHERTYPE_IPV6: 1653 ehlen = ETHER_HDR_LEN; 1654 break; 1655 case ETHERTYPE_VLAN: 1656 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1657 break; 1658 default: 1659 ehlen = 0; 1660 break; 1661 } 1662 1663 if (ehlen) { 1664 const int off = 1665 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1666 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1667 if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1668 uint16_t zero = 0; 1669 m_copyback(m, ehlen + off, sizeof(zero), &zero); 1670 } 1671 } 1672 } 1673 1674 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1675 BUS_DMASYNC_PREWRITE); 1676 1677 for (i = 0; i < map->dm_nsegs; i++) { 1678 flags1 = TXFLAGS1_R; 1679 flags2 = 0; 1680 1681 if (i == 0) { 1682 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1683 sc->sc_txsoft[idx].txs_mbuf = m; 1684 } 1685 1686 /* checksum offloading */ 1687 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1688 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1689 flags2 |= TXFLAGS2_PINS; 1690 if (csumflags & (M_CSUM_IPv4)) 1691 flags2 |= TXFLAGS2_IINS; 1692 1693 if (i == map->dm_nsegs - 1) { 1694 /* mark last segment */ 1695 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1696 flags2 |= TXFLAGS2_INT; 1697 } 1698 if (idx == ENET_TX_RING_CNT - 1) { 1699 /* mark end of ring */ 1700 flags1 |= TXFLAGS1_W; 1701 } 1702 1703 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1704 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1705 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1706 TXDESC_WRITEOUT(idx); 1707 1708 sc->sc_txdesc_ring[idx].tx_flags1_len = 1709 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1710 TXDESC_WRITEOUT(idx); 1711 1712 idx = ENET_TX_NEXTIDX(idx); 1713 sc->sc_tx_free--; 1714 } 1715 1716 sc->sc_tx_prodidx = idx; 1717 1718 return 0; 1719 } 1720 1721 /* 1722 * device initialize 1723 */ 1724 static int 1725 enet_init_regs(struct enet_softc *sc, int init) 1726 { 1727 struct mii_data *mii; 1728 struct ifmedia_entry *ife; 1729 paddr_t paddr; 1730 uint32_t val; 1731 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1732 1733 if (init) { 1734 fulldup = 1; 1735 ecr_speed = ENET_ECR_SPEED; 1736 rcr_speed = 0; 1737 flowctrl = 0; 1738 } else { 1739 mii = &sc->sc_mii; 1740 ife = mii->mii_media.ifm_cur; 1741 1742 if ((ife->ifm_media & IFM_FDX) != 0) 1743 fulldup = 1; 1744 else 1745 fulldup = 0; 1746 1747 switch (IFM_SUBTYPE(ife->ifm_media)) { 1748 case IFM_10_T: 1749 ecr_speed = 0; 1750 rcr_speed = ENET_RCR_RMII_10T; 1751 break; 1752 case IFM_100_TX: 1753 ecr_speed = 0; 1754 rcr_speed = 0; 1755 break; 1756 default: 1757 ecr_speed = ENET_ECR_SPEED; 1758 rcr_speed = 0; 1759 break; 1760 } 1761 1762 flowctrl = sc->sc_flowflags & IFM_FLOW; 1763 } 1764 1765 if (sc->sc_rgmii == 0) 1766 ecr_speed = 0; 1767 1768 /* reset */ 1769 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1770 1771 /* mask and clear all interrupt */ 1772 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1773 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1774 1775 /* full duplex */ 1776 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1777 1778 /* clear and enable MIB register */ 1779 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1780 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1781 1782 /* MII speed setup. MDCclk(=2.5MHz) = ENET_PLL/((val+1)*2) */ 1783 val = ((sc->sc_pllclock) / 500000 - 1) / 10; 1784 ENET_REG_WRITE(sc, ENET_MSCR, val << 1); 1785 1786 /* Opcode/Pause Duration */ 1787 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1788 1789 /* Receive FIFO */ 1790 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1791 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1792 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1793 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1794 1795 /* Transmit FIFO */ 1796 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1797 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1798 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1799 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1800 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1801 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1802 1803 /* hardware checksum is default off (override in TX descripter) */ 1804 ENET_REG_WRITE(sc, ENET_TACC, 0); 1805 1806 /* 1807 * align ethernet payload on 32bit, discard frames with MAC layer error, 1808 * and don't discard checksum error 1809 */ 1810 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1811 1812 /* maximum frame size */ 1813 val = ENET_DEFAULT_PKT_LEN; 1814 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1815 1816 if (sc->sc_rgmii == 0) 1817 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1818 else 1819 miimode = ENET_RCR_RGMII_EN; 1820 ENET_REG_WRITE(sc, ENET_RCR, 1821 ENET_RCR_PADEN | /* RX frame padding remove */ 1822 miimode | 1823 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1824 rcr_speed | 1825 (fulldup ? 0 : ENET_RCR_DRT) | 1826 ENET_RCR_MAX_FL(val)); 1827 1828 /* Maximum Receive BufSize per one descriptor */ 1829 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1830 1831 1832 /* TX/RX Descriptor Physical Address */ 1833 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1834 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1835 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1836 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1837 /* sync cache */ 1838 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1839 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1840 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1841 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1842 1843 /* enable interrupts */ 1844 val = ENET_EIMR | ENET_EIR_TXF | ENET_EIR_RXF | ENET_EIR_EBERR; 1845 if (sc->sc_imxtype == 7) 1846 val |= ENET_EIR_TXF2 | ENET_EIR_RXF2 | ENET_EIR_TXF1 | 1847 ENET_EIR_RXF1; 1848 ENET_REG_WRITE(sc, ENET_EIMR, val); 1849 1850 /* enable ether */ 1851 ENET_REG_WRITE(sc, ENET_ECR, 1852 #if _BYTE_ORDER == _LITTLE_ENDIAN 1853 ENET_ECR_DBSWP | 1854 #endif 1855 ecr_speed | 1856 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1857 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1858 1859 return 0; 1860 } 1861 1862 static int 1863 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1864 bus_dmamap_t *mapp) 1865 { 1866 bus_dma_segment_t seglist[1]; 1867 int nsegs, error; 1868 1869 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1870 1, &nsegs, M_NOWAIT)) != 0) { 1871 device_printf(sc->sc_dev, 1872 "unable to allocate DMA buffer, error=%d\n", error); 1873 goto fail_alloc; 1874 } 1875 1876 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1877 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1878 device_printf(sc->sc_dev, 1879 "unable to map DMA buffer, error=%d\n", 1880 error); 1881 goto fail_map; 1882 } 1883 1884 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1885 BUS_DMA_NOWAIT, mapp)) != 0) { 1886 device_printf(sc->sc_dev, 1887 "unable to create DMA map, error=%d\n", error); 1888 goto fail_create; 1889 } 1890 1891 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1892 BUS_DMA_NOWAIT)) != 0) { 1893 aprint_error_dev(sc->sc_dev, 1894 "unable to load DMA map, error=%d\n", error); 1895 goto fail_load; 1896 } 1897 1898 return 0; 1899 1900 fail_load: 1901 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1902 fail_create: 1903 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1904 fail_map: 1905 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1906 fail_alloc: 1907 return error; 1908 } 1909