1 /* $NetBSD: if_enet.c,v 1.4 2015/04/27 17:34:51 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.4 2015/04/27 17:34:51 christos Exp $"); 35 36 #include "imxocotp.h" 37 #include "imxccm.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/bus.h> 42 #include <sys/mbuf.h> 43 #include <sys/device.h> 44 #include <sys/sockio.h> 45 #include <sys/kernel.h> 46 #include <sys/rndsource.h> 47 48 #include <lib/libkern/libkern.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_ether.h> 54 #include <net/bpf.h> 55 #include <net/if_vlanvar.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <arm/imx/imx6var.h> 65 #include <arm/imx/imx6_reg.h> 66 #include <arm/imx/imx6_ocotpreg.h> 67 #include <arm/imx/imx6_ocotpvar.h> 68 #include <arm/imx/imx6_ccmreg.h> 69 #include <arm/imx/imx6_ccmvar.h> 70 #include <arm/imx/if_enetreg.h> 71 #include "locators.h" 72 73 #undef DEBUG_ENET 74 #undef ENET_EVENT_COUNTER 75 76 #ifdef DEBUG_ENET 77 int enet_debug = 0; 78 # define DEVICE_DPRINTF(args...) \ 79 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 80 #else 81 # define DEVICE_DPRINTF(args...) 82 #endif 83 84 85 #define RXDESC_MAXBUFSIZE 0x07f0 86 /* iMX6 ENET not work greather than 0x0800... */ 87 88 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 89 #ifdef ENET_SUPPORT_JUMBO 90 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 91 #else 92 # define ENET_MAX_PKT_LEN 1522 93 #endif 94 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 95 #define MTU2FRAMESIZE(n) \ 96 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 97 98 99 #define ENET_MAX_PKT_NSEGS 64 100 #define ENET_TX_RING_CNT 256 /* must be 2^n */ 101 #define ENET_RX_RING_CNT 256 /* must be 2^n */ 102 103 #define ENET_TX_NEXTIDX(idx) (((idx) + 1) & (ENET_TX_RING_CNT - 1)) 104 #define ENET_RX_NEXTIDX(idx) (((idx) + 1) & (ENET_RX_RING_CNT - 1)) 105 106 struct enet_txsoft { 107 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 108 bus_dmamap_t txs_dmamap; /* our DMA map */ 109 }; 110 111 struct enet_rxsoft { 112 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 113 bus_dmamap_t rxs_dmamap; /* our DMA map */ 114 }; 115 116 struct enet_softc { 117 device_t sc_dev; 118 119 bus_addr_t sc_addr; 120 bus_space_tag_t sc_iot; 121 bus_space_handle_t sc_ioh; 122 bus_dma_tag_t sc_dmat; 123 124 /* interrupts */ 125 void *sc_ih; 126 callout_t sc_tick_ch; 127 bool sc_stopping; 128 129 /* TX */ 130 struct enet_txdesc *sc_txdesc_ring; /* [ENET_TX_RING_CNT] */ 131 bus_dmamap_t sc_txdesc_dmamap; 132 struct enet_rxdesc *sc_rxdesc_ring; /* [ENET_RX_RING_CNT] */ 133 bus_dmamap_t sc_rxdesc_dmamap; 134 struct enet_txsoft sc_txsoft[ENET_TX_RING_CNT]; 135 int sc_tx_considx; 136 int sc_tx_prodidx; 137 int sc_tx_free; 138 139 /* RX */ 140 struct enet_rxsoft sc_rxsoft[ENET_RX_RING_CNT]; 141 int sc_rx_readidx; 142 143 /* misc */ 144 int sc_if_flags; /* local copy of if_flags */ 145 int sc_flowflags; /* 802.3x flow control flags */ 146 struct ethercom sc_ethercom; /* interface info */ 147 struct mii_data sc_mii; 148 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 149 krndsource_t sc_rnd_source; 150 151 #ifdef ENET_EVENT_COUNTER 152 struct evcnt sc_ev_t_drop; 153 struct evcnt sc_ev_t_packets; 154 struct evcnt sc_ev_t_bc_pkt; 155 struct evcnt sc_ev_t_mc_pkt; 156 struct evcnt sc_ev_t_crc_align; 157 struct evcnt sc_ev_t_undersize; 158 struct evcnt sc_ev_t_oversize; 159 struct evcnt sc_ev_t_frag; 160 struct evcnt sc_ev_t_jab; 161 struct evcnt sc_ev_t_col; 162 struct evcnt sc_ev_t_p64; 163 struct evcnt sc_ev_t_p65to127n; 164 struct evcnt sc_ev_t_p128to255n; 165 struct evcnt sc_ev_t_p256to511; 166 struct evcnt sc_ev_t_p512to1023; 167 struct evcnt sc_ev_t_p1024to2047; 168 struct evcnt sc_ev_t_p_gte2048; 169 struct evcnt sc_ev_t_octets; 170 struct evcnt sc_ev_r_packets; 171 struct evcnt sc_ev_r_bc_pkt; 172 struct evcnt sc_ev_r_mc_pkt; 173 struct evcnt sc_ev_r_crc_align; 174 struct evcnt sc_ev_r_undersize; 175 struct evcnt sc_ev_r_oversize; 176 struct evcnt sc_ev_r_frag; 177 struct evcnt sc_ev_r_jab; 178 struct evcnt sc_ev_r_p64; 179 struct evcnt sc_ev_r_p65to127; 180 struct evcnt sc_ev_r_p128to255; 181 struct evcnt sc_ev_r_p256to511; 182 struct evcnt sc_ev_r_p512to1023; 183 struct evcnt sc_ev_r_p1024to2047; 184 struct evcnt sc_ev_r_p_gte2048; 185 struct evcnt sc_ev_r_octets; 186 #endif /* ENET_EVENT_COUNTER */ 187 }; 188 189 #define TXDESC_WRITEOUT(idx) \ 190 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 191 sizeof(struct enet_txdesc) * (idx), \ 192 sizeof(struct enet_txdesc), \ 193 BUS_DMASYNC_PREWRITE) 194 195 #define TXDESC_READIN(idx) \ 196 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 197 sizeof(struct enet_txdesc) * (idx), \ 198 sizeof(struct enet_txdesc), \ 199 BUS_DMASYNC_PREREAD) 200 201 #define RXDESC_WRITEOUT(idx) \ 202 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 203 sizeof(struct enet_rxdesc) * (idx), \ 204 sizeof(struct enet_rxdesc), \ 205 BUS_DMASYNC_PREWRITE) 206 207 #define RXDESC_READIN(idx) \ 208 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 209 sizeof(struct enet_rxdesc) * (idx), \ 210 sizeof(struct enet_rxdesc), \ 211 BUS_DMASYNC_PREREAD) 212 213 #define ENET_REG_READ(sc, reg) \ 214 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 215 216 #define ENET_REG_WRITE(sc, reg, value) \ 217 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 218 219 static int enet_match(device_t, struct cfdata *, void *); 220 static void enet_attach(device_t, device_t, void *); 221 #ifdef ENET_EVENT_COUNTER 222 static void enet_attach_evcnt(struct enet_softc *); 223 static void enet_update_evcnt(struct enet_softc *); 224 #endif 225 226 static int enet_intr(void *); 227 static void enet_tick(void *); 228 static int enet_tx_intr(void *); 229 static int enet_rx_intr(void *); 230 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 231 int); 232 233 static void enet_start(struct ifnet *); 234 static int enet_ifflags_cb(struct ethercom *); 235 static int enet_ioctl(struct ifnet *, u_long, void *); 236 static int enet_init(struct ifnet *); 237 static void enet_stop(struct ifnet *, int); 238 static void enet_watchdog(struct ifnet *); 239 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 240 241 static int enet_miibus_readreg(device_t, int, int); 242 static void enet_miibus_writereg(device_t, int, int, int); 243 static void enet_miibus_statchg(struct ifnet *); 244 245 static void enet_ocotp_getmacaddr(uint8_t *); 246 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 247 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 248 static void enet_setmulti(struct enet_softc *); 249 static int enet_encap_mbufalign(struct mbuf **); 250 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 251 static int enet_init_plls(struct enet_softc *); 252 static int enet_init_regs(struct enet_softc *, int); 253 static int enet_alloc_ring(struct enet_softc *); 254 static void enet_init_txring(struct enet_softc *); 255 static int enet_init_rxring(struct enet_softc *); 256 static void enet_reset_rxdesc(struct enet_softc *, int); 257 static int enet_alloc_rxbuf(struct enet_softc *, int); 258 static void enet_drain_txbuf(struct enet_softc *); 259 static void enet_drain_rxbuf(struct enet_softc *); 260 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 261 bus_dmamap_t *); 262 263 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 264 enet_match, enet_attach, NULL, NULL); 265 266 /* ARGSUSED */ 267 static int 268 enet_match(device_t parent __unused, struct cfdata *match __unused, void *aux) 269 { 270 struct axi_attach_args *aa; 271 272 aa = aux; 273 274 switch (aa->aa_addr) { 275 case (IMX6_AIPS2_BASE + AIPS2_ENET_BASE): 276 return 1; 277 } 278 279 return 0; 280 } 281 282 /* ARGSUSED */ 283 static void 284 enet_attach(device_t parent __unused, device_t self, void *aux) 285 { 286 struct enet_softc *sc; 287 struct axi_attach_args *aa; 288 struct ifnet *ifp; 289 290 aa = aux; 291 sc = device_private(self); 292 sc->sc_dev = self; 293 sc->sc_iot = aa->aa_iot; 294 sc->sc_addr = aa->aa_addr; 295 sc->sc_dmat = aa->aa_dmat; 296 297 if (aa->aa_size == AXICF_SIZE_DEFAULT) 298 aa->aa_size = AIPS2_ENET_SIZE; 299 300 aprint_naive("\n"); 301 aprint_normal(": Gigabit Ethernet Controller\n"); 302 if (bus_space_map(sc->sc_iot, sc->sc_addr, aa->aa_size, 0, 303 &sc->sc_ioh)) { 304 aprint_error_dev(self, "cannot map registers\n"); 305 return; 306 } 307 308 /* allocate dma buffer */ 309 if (enet_alloc_ring(sc)) 310 return; 311 312 #define IS_ENADDR_ZERO(enaddr) \ 313 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 314 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 315 316 /* get mac-address from SoC eFuse */ 317 enet_ocotp_getmacaddr(sc->sc_enaddr); 318 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 319 /* by any chance, mac-address is already set by bootloader? */ 320 enet_gethwaddr(sc, sc->sc_enaddr); 321 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 322 /* give up. set randomly */ 323 uint32_t addr = random(); 324 /* not multicast */ 325 sc->sc_enaddr[0] = (addr >> 24) & 0xfc; 326 sc->sc_enaddr[1] = addr >> 16; 327 sc->sc_enaddr[2] = addr >> 8; 328 sc->sc_enaddr[3] = addr; 329 addr = random(); 330 sc->sc_enaddr[4] = addr >> 8; 331 sc->sc_enaddr[5] = addr; 332 333 aprint_error_dev(self, 334 "cannot get mac address. set randomly\n"); 335 } 336 } 337 enet_sethwaddr(sc, sc->sc_enaddr); 338 339 aprint_normal_dev(self, "Ethernet address %s\n", 340 ether_sprintf(sc->sc_enaddr)); 341 342 /* power up and init */ 343 if (enet_init_plls(sc) != 0) 344 goto failure; 345 enet_init_regs(sc, 1); 346 347 /* setup interrupt handlers */ 348 if ((sc->sc_ih = intr_establish(aa->aa_irq, IPL_NET, 349 IST_LEVEL, enet_intr, sc)) == NULL) { 350 aprint_error_dev(self, "unable to establish interrupt\n"); 351 goto failure; 352 } 353 354 /* callout will be scheduled from enet_init() */ 355 callout_init(&sc->sc_tick_ch, 0); 356 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 357 358 /* setup ifp */ 359 ifp = &sc->sc_ethercom.ec_if; 360 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 361 ifp->if_softc = sc; 362 ifp->if_mtu = ETHERMTU; 363 ifp->if_baudrate = IF_Gbps(1); 364 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 365 ifp->if_ioctl = enet_ioctl; 366 ifp->if_start = enet_start; 367 ifp->if_init = enet_init; 368 ifp->if_stop = enet_stop; 369 ifp->if_watchdog = enet_watchdog; 370 371 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 372 #ifdef ENET_SUPPORT_JUMBO 373 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 374 #endif 375 376 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 377 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 378 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 379 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 380 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 381 382 IFQ_SET_MAXLEN(&ifp->if_snd, max(ENET_TX_RING_CNT, IFQ_MAXLEN)); 383 IFQ_SET_READY(&ifp->if_snd); 384 385 /* setup MII */ 386 sc->sc_ethercom.ec_mii = &sc->sc_mii; 387 sc->sc_mii.mii_ifp = ifp; 388 sc->sc_mii.mii_readreg = enet_miibus_readreg; 389 sc->sc_mii.mii_writereg = enet_miibus_writereg; 390 sc->sc_mii.mii_statchg = enet_miibus_statchg; 391 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 392 enet_mediastatus); 393 394 /* try to attach PHY */ 395 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 396 MII_OFFSET_ANY, 0); 397 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 398 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 399 0, NULL); 400 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 401 } else { 402 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 403 } 404 405 if_attach(ifp); 406 ether_ifattach(ifp, sc->sc_enaddr); 407 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 408 409 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 410 RND_TYPE_NET, RND_FLAG_DEFAULT); 411 412 #ifdef ENET_EVENT_COUNTER 413 enet_attach_evcnt(sc); 414 #endif 415 416 sc->sc_stopping = false; 417 418 return; 419 420 failure: 421 bus_space_unmap(sc->sc_iot, sc->sc_ioh, aa->aa_size); 422 return; 423 } 424 425 #ifdef ENET_EVENT_COUNTER 426 static void 427 enet_attach_evcnt(struct enet_softc *sc) 428 { 429 const char *xname; 430 431 xname = device_xname(sc->sc_dev); 432 433 #define ENET_EVCNT_ATTACH(name) \ 434 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 435 NULL, xname, #name); 436 437 ENET_EVCNT_ATTACH(t_drop); 438 ENET_EVCNT_ATTACH(t_packets); 439 ENET_EVCNT_ATTACH(t_bc_pkt); 440 ENET_EVCNT_ATTACH(t_mc_pkt); 441 ENET_EVCNT_ATTACH(t_crc_align); 442 ENET_EVCNT_ATTACH(t_undersize); 443 ENET_EVCNT_ATTACH(t_oversize); 444 ENET_EVCNT_ATTACH(t_frag); 445 ENET_EVCNT_ATTACH(t_jab); 446 ENET_EVCNT_ATTACH(t_col); 447 ENET_EVCNT_ATTACH(t_p64); 448 ENET_EVCNT_ATTACH(t_p65to127n); 449 ENET_EVCNT_ATTACH(t_p128to255n); 450 ENET_EVCNT_ATTACH(t_p256to511); 451 ENET_EVCNT_ATTACH(t_p512to1023); 452 ENET_EVCNT_ATTACH(t_p1024to2047); 453 ENET_EVCNT_ATTACH(t_p_gte2048); 454 ENET_EVCNT_ATTACH(t_octets); 455 ENET_EVCNT_ATTACH(r_packets); 456 ENET_EVCNT_ATTACH(r_bc_pkt); 457 ENET_EVCNT_ATTACH(r_mc_pkt); 458 ENET_EVCNT_ATTACH(r_crc_align); 459 ENET_EVCNT_ATTACH(r_undersize); 460 ENET_EVCNT_ATTACH(r_oversize); 461 ENET_EVCNT_ATTACH(r_frag); 462 ENET_EVCNT_ATTACH(r_jab); 463 ENET_EVCNT_ATTACH(r_p64); 464 ENET_EVCNT_ATTACH(r_p65to127); 465 ENET_EVCNT_ATTACH(r_p128to255); 466 ENET_EVCNT_ATTACH(r_p256to511); 467 ENET_EVCNT_ATTACH(r_p512to1023); 468 ENET_EVCNT_ATTACH(r_p1024to2047); 469 ENET_EVCNT_ATTACH(r_p_gte2048); 470 ENET_EVCNT_ATTACH(r_octets); 471 } 472 473 static void 474 enet_update_evcnt(struct enet_softc *sc) 475 { 476 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 477 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 478 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 479 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 480 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 481 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 482 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 483 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 484 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 485 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 486 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 487 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 488 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 489 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 490 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 491 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 492 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 493 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 494 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 495 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 496 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 497 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 498 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 499 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 500 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 501 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 502 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 503 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 504 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 505 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 506 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 507 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 508 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 509 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 510 } 511 #endif /* ENET_EVENT_COUNTER */ 512 513 static void 514 enet_tick(void *arg) 515 { 516 struct enet_softc *sc; 517 struct mii_data *mii; 518 struct ifnet *ifp; 519 int s; 520 521 sc = arg; 522 mii = &sc->sc_mii; 523 ifp = &sc->sc_ethercom.ec_if; 524 525 s = splnet(); 526 527 if (sc->sc_stopping) 528 goto out; 529 530 531 #ifdef ENET_EVENT_COUNTER 532 enet_update_evcnt(sc); 533 #endif 534 535 /* update counters */ 536 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 537 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 538 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 539 540 /* clear counters */ 541 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 542 ENET_REG_WRITE(sc, ENET_MIBC, 0); 543 544 mii_tick(mii); 545 out: 546 547 if (!sc->sc_stopping) 548 callout_schedule(&sc->sc_tick_ch, hz); 549 550 splx(s); 551 } 552 553 static int 554 enet_intr(void *arg) 555 { 556 struct enet_softc *sc; 557 struct ifnet *ifp; 558 uint32_t status; 559 560 sc = arg; 561 status = ENET_REG_READ(sc, ENET_EIR); 562 563 if (status & ENET_EIR_TXF) 564 enet_tx_intr(arg); 565 566 if (status & ENET_EIR_RXF) 567 enet_rx_intr(arg); 568 569 if (status & ENET_EIR_EBERR) { 570 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 571 ifp = &sc->sc_ethercom.ec_if; 572 enet_stop(ifp, 1); 573 enet_init(ifp); 574 } else { 575 ENET_REG_WRITE(sc, ENET_EIR, status); 576 } 577 578 rnd_add_uint32(&sc->sc_rnd_source, status); 579 580 return 1; 581 } 582 583 static int 584 enet_tx_intr(void *arg) 585 { 586 struct enet_softc *sc; 587 struct ifnet *ifp; 588 struct enet_txsoft *txs; 589 int idx; 590 591 sc = (struct enet_softc *)arg; 592 ifp = &sc->sc_ethercom.ec_if; 593 594 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 595 idx = ENET_TX_NEXTIDX(idx)) { 596 597 txs = &sc->sc_txsoft[idx]; 598 599 TXDESC_READIN(idx); 600 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 601 /* This TX Descriptor has not been transmitted yet */ 602 break; 603 } 604 605 /* txsoft is available on first segment (TXFLAGS1_T1) */ 606 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 607 bus_dmamap_unload(sc->sc_dmat, 608 txs->txs_dmamap); 609 m_freem(txs->txs_mbuf); 610 ifp->if_opackets++; 611 } 612 613 /* checking error */ 614 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 615 uint32_t flags2; 616 617 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 618 619 if (flags2 & (TXFLAGS2_TXE | 620 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 621 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 622 #ifdef DEBUG_ENET 623 if (enet_debug) { 624 char flagsbuf[128]; 625 626 snprintb(flagsbuf, sizeof(flagsbuf), 627 "\20" "\20TRANSMIT" "\16UNDERFLOW" 628 "\15COLLISION" "\14FRAME" 629 "\13LATECOLLISION" "\12OVERFLOW", 630 flags2); 631 632 device_printf(sc->sc_dev, 633 "txdesc[%d]: transmit error: " 634 "flags2=%s\n", idx, flagsbuf); 635 } 636 #endif /* DEBUG_ENET */ 637 ifp->if_oerrors++; 638 } 639 } 640 641 sc->sc_tx_free++; 642 } 643 sc->sc_tx_considx = idx; 644 645 if (sc->sc_tx_free > 0) 646 ifp->if_flags &= ~IFF_OACTIVE; 647 648 /* 649 * No more pending TX descriptor, 650 * cancel the watchdog timer. 651 */ 652 if (sc->sc_tx_free == ENET_TX_RING_CNT) 653 ifp->if_timer = 0; 654 655 return 1; 656 } 657 658 static int 659 enet_rx_intr(void *arg) 660 { 661 struct enet_softc *sc; 662 struct ifnet *ifp; 663 struct enet_rxsoft *rxs; 664 int idx, len, amount; 665 uint32_t flags1, flags2; 666 struct mbuf *m, *m0, *mprev; 667 668 sc = arg; 669 ifp = &sc->sc_ethercom.ec_if; 670 671 m0 = mprev = NULL; 672 amount = 0; 673 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 674 675 rxs = &sc->sc_rxsoft[idx]; 676 677 RXDESC_READIN(idx); 678 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 679 /* This RX Descriptor has not been received yet */ 680 break; 681 } 682 683 /* 684 * build mbuf from RX Descriptor if needed 685 */ 686 m = rxs->rxs_mbuf; 687 rxs->rxs_mbuf = NULL; 688 689 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 690 len = RXFLAGS1_LEN(flags1); 691 692 #define RACC_SHIFT16 2 693 if (m0 == NULL) { 694 m0 = m; 695 m_adj(m0, RACC_SHIFT16); 696 len -= RACC_SHIFT16; 697 m->m_len = len; 698 amount = len; 699 } else { 700 if (flags1 & RXFLAGS1_L) 701 len = len - amount - RACC_SHIFT16; 702 703 m->m_len = len; 704 amount += len; 705 m->m_flags &= ~M_PKTHDR; 706 mprev->m_next = m; 707 } 708 mprev = m; 709 710 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 711 712 if (flags1 & RXFLAGS1_L) { 713 /* last buffer */ 714 if ((amount < ETHER_HDR_LEN) || 715 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 716 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 717 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 718 RXFLAGS2_CE)))) { 719 720 #ifdef DEBUG_ENET 721 if (enet_debug) { 722 char flags1buf[128], flags2buf[128]; 723 snprintb(flags1buf, sizeof(flags1buf), 724 "\20" "\31MISS" "\26LENGTHVIOLATION" 725 "\25NONOCTET" "\23CRC" "\22OVERRUN" 726 "\21TRUNCATED", flags1); 727 snprintb(flags2buf, sizeof(flags2buf), 728 "\20" "\40MAC" "\33PHY" 729 "\32COLLISION", flags2); 730 731 DEVICE_DPRINTF( 732 "rxdesc[%d]: receive error: " 733 "flags1=%s,flags2=%s,len=%d\n", 734 idx, flags1buf, flags2buf, amount); 735 } 736 #endif /* DEBUG_ENET */ 737 ifp->if_ierrors++; 738 m_freem(m0); 739 740 } else { 741 /* packet receive ok */ 742 ifp->if_ipackets++; 743 m0->m_pkthdr.rcvif = ifp; 744 m0->m_pkthdr.len = amount; 745 746 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 747 rxs->rxs_dmamap->dm_mapsize, 748 BUS_DMASYNC_PREREAD); 749 750 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 751 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 752 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 753 enet_rx_csum(sc, ifp, m0, idx); 754 755 /* Pass this up to any BPF listeners */ 756 bpf_mtap(ifp, m0); 757 758 (*ifp->if_input)(ifp, m0); 759 } 760 761 m0 = NULL; 762 mprev = NULL; 763 amount = 0; 764 765 } else { 766 /* continued from previous buffer */ 767 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 768 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 769 } 770 771 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 772 if (enet_alloc_rxbuf(sc, idx) != 0) { 773 panic("enet_alloc_rxbuf NULL\n"); 774 } 775 } 776 sc->sc_rx_readidx = idx; 777 778 /* re-enable RX DMA to make sure */ 779 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 780 781 return 1; 782 } 783 784 static void 785 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 786 { 787 uint32_t flags2; 788 uint8_t proto; 789 790 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 791 792 if (flags2 & RXFLAGS2_IPV6) { 793 proto = sc->sc_rxdesc_ring[idx].rx_proto; 794 795 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 796 if ((proto == IPPROTO_TCP) && 797 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 798 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 799 else if ((proto == IPPROTO_UDP) && 800 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 801 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 802 else 803 return; 804 805 /* IPv6 protocol checksum error */ 806 if (flags2 & RXFLAGS2_PCR) 807 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 808 809 } else { 810 struct ether_header *eh; 811 uint8_t *ip; 812 813 eh = mtod(m, struct ether_header *); 814 815 /* XXX: is an IPv4? */ 816 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 817 return; 818 ip = (uint8_t *)(eh + 1); 819 if ((ip[0] & 0xf0) == 0x40) 820 return; 821 822 proto = sc->sc_rxdesc_ring[idx].rx_proto; 823 if (flags2 & RXFLAGS2_ICE) { 824 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 825 m->m_pkthdr.csum_flags |= 826 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 827 } 828 } else { 829 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 830 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 831 } 832 833 /* 834 * PCR is valid when 835 * ICE == 0 and FRAG == 0 836 */ 837 if (flags2 & RXFLAGS2_FRAG) 838 return; 839 840 /* 841 * PCR is valid when proto is TCP or UDP 842 */ 843 if ((proto == IPPROTO_TCP) && 844 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 845 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 846 else if ((proto == IPPROTO_UDP) && 847 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 848 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 849 else 850 return; 851 852 /* IPv4 protocol cksum error */ 853 if (flags2 & RXFLAGS2_PCR) 854 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 855 } 856 } 857 } 858 859 static void 860 enet_setmulti(struct enet_softc *sc) 861 { 862 struct ifnet *ifp; 863 struct ether_multi *enm; 864 struct ether_multistep step; 865 int promisc; 866 uint32_t crc; 867 uint32_t gaddr[2]; 868 869 ifp = &sc->sc_ethercom.ec_if; 870 871 promisc = 0; 872 if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) { 873 ifp->if_flags |= IFF_ALLMULTI; 874 if (ifp->if_flags & IFF_PROMISC) 875 promisc = 1; 876 gaddr[0] = gaddr[1] = 0xffffffff; 877 } else { 878 gaddr[0] = gaddr[1] = 0; 879 880 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 881 while (enm != NULL) { 882 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 883 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 884 ETHER_NEXT_MULTI(step, enm); 885 } 886 } 887 888 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 889 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 890 891 if (promisc) { 892 /* match all packet */ 893 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 894 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 895 } else { 896 /* don't match any packet */ 897 ENET_REG_WRITE(sc, ENET_IAUR, 0); 898 ENET_REG_WRITE(sc, ENET_IALR, 0); 899 } 900 } 901 902 static void 903 enet_ocotp_getmacaddr(uint8_t *macaddr) 904 { 905 #if NIMXOCOTP > 0 906 uint32_t addr; 907 908 addr = imxocotp_read(OCOTP_MAC1); 909 macaddr[0] = addr >> 8; 910 macaddr[1] = addr; 911 912 addr = imxocotp_read(OCOTP_MAC0); 913 macaddr[2] = addr >> 24; 914 macaddr[3] = addr >> 16; 915 macaddr[4] = addr >> 8; 916 macaddr[5] = addr; 917 #endif 918 } 919 920 static void 921 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 922 { 923 uint32_t paddr; 924 925 paddr = ENET_REG_READ(sc, ENET_PALR); 926 hwaddr[0] = paddr >> 24; 927 hwaddr[1] = paddr >> 16; 928 hwaddr[2] = paddr >> 8; 929 hwaddr[3] = paddr; 930 931 paddr = ENET_REG_READ(sc, ENET_PAUR); 932 hwaddr[4] = paddr >> 24; 933 hwaddr[5] = paddr >> 16; 934 } 935 936 static void 937 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 938 { 939 uint32_t paddr; 940 941 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 942 hwaddr[3]; 943 ENET_REG_WRITE(sc, ENET_PALR, paddr); 944 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 945 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 946 } 947 948 /* 949 * ifnet interfaces 950 */ 951 static int 952 enet_init(struct ifnet *ifp) 953 { 954 struct enet_softc *sc; 955 int s, error; 956 957 sc = ifp->if_softc; 958 959 s = splnet(); 960 961 enet_init_regs(sc, 0); 962 enet_init_txring(sc); 963 error = enet_init_rxring(sc); 964 if (error != 0) { 965 enet_drain_rxbuf(sc); 966 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 967 goto init_failure; 968 } 969 970 /* reload mac address */ 971 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 972 enet_sethwaddr(sc, sc->sc_enaddr); 973 974 /* program multicast address */ 975 enet_setmulti(sc); 976 977 /* update if_flags */ 978 ifp->if_flags |= IFF_RUNNING; 979 ifp->if_flags &= ~IFF_OACTIVE; 980 981 /* update local copy of if_flags */ 982 sc->sc_if_flags = ifp->if_flags; 983 984 /* mii */ 985 mii_mediachg(&sc->sc_mii); 986 987 /* enable RX DMA */ 988 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 989 990 sc->sc_stopping = false; 991 callout_schedule(&sc->sc_tick_ch, hz); 992 993 init_failure: 994 splx(s); 995 996 return error; 997 } 998 999 static void 1000 enet_start(struct ifnet *ifp) 1001 { 1002 struct enet_softc *sc; 1003 struct mbuf *m; 1004 int npkt; 1005 1006 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1007 return; 1008 1009 sc = ifp->if_softc; 1010 for (npkt = 0; ; npkt++) { 1011 IFQ_POLL(&ifp->if_snd, m); 1012 if (m == NULL) 1013 break; 1014 1015 if (sc->sc_tx_free <= 0) { 1016 /* no tx descriptor now... */ 1017 ifp->if_flags |= IFF_OACTIVE; 1018 DEVICE_DPRINTF("TX descriptor is full\n"); 1019 break; 1020 } 1021 1022 IFQ_DEQUEUE(&ifp->if_snd, m); 1023 1024 if (enet_encap_txring(sc, &m) != 0) { 1025 /* too many mbuf chains? */ 1026 ifp->if_flags |= IFF_OACTIVE; 1027 DEVICE_DPRINTF( 1028 "TX descriptor is full. dropping packet\n"); 1029 m_freem(m); 1030 ifp->if_oerrors++; 1031 break; 1032 } 1033 1034 /* Pass the packet to any BPF listeners */ 1035 bpf_mtap(ifp, m); 1036 } 1037 1038 if (npkt) { 1039 /* enable TX DMA */ 1040 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 1041 1042 ifp->if_timer = 5; 1043 } 1044 } 1045 1046 static void 1047 enet_stop(struct ifnet *ifp, int disable) 1048 { 1049 struct enet_softc *sc; 1050 int s; 1051 uint32_t v; 1052 1053 sc = ifp->if_softc; 1054 1055 s = splnet(); 1056 1057 sc->sc_stopping = true; 1058 callout_stop(&sc->sc_tick_ch); 1059 1060 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 1061 v = ENET_REG_READ(sc, ENET_ECR); 1062 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 1063 1064 /* Mark the interface as down and cancel the watchdog timer. */ 1065 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1066 ifp->if_timer = 0; 1067 1068 if (disable) { 1069 enet_drain_txbuf(sc); 1070 enet_drain_rxbuf(sc); 1071 } 1072 1073 splx(s); 1074 } 1075 1076 static void 1077 enet_watchdog(struct ifnet *ifp) 1078 { 1079 struct enet_softc *sc; 1080 int s; 1081 1082 sc = ifp->if_softc; 1083 s = splnet(); 1084 1085 device_printf(sc->sc_dev, "watchdog timeout\n"); 1086 ifp->if_oerrors++; 1087 1088 /* salvage packets left in descriptors */ 1089 enet_tx_intr(sc); 1090 enet_rx_intr(sc); 1091 1092 /* reset */ 1093 enet_stop(ifp, 1); 1094 enet_init(ifp); 1095 1096 splx(s); 1097 } 1098 1099 static void 1100 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1101 { 1102 struct enet_softc *sc = ifp->if_softc; 1103 1104 ether_mediastatus(ifp, ifmr); 1105 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 1106 | sc->sc_flowflags; 1107 } 1108 1109 static int 1110 enet_ifflags_cb(struct ethercom *ec) 1111 { 1112 struct ifnet *ifp = &ec->ec_if; 1113 struct enet_softc *sc = ifp->if_softc; 1114 int change = ifp->if_flags ^ sc->sc_if_flags; 1115 1116 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1117 return ENETRESET; 1118 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1119 return 0; 1120 1121 enet_setmulti(sc); 1122 1123 sc->sc_if_flags = ifp->if_flags; 1124 return 0; 1125 } 1126 1127 static int 1128 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1129 { 1130 struct enet_softc *sc; 1131 struct ifreq *ifr; 1132 int s, error; 1133 uint32_t v; 1134 1135 sc = ifp->if_softc; 1136 ifr = data; 1137 1138 error = 0; 1139 1140 s = splnet(); 1141 1142 switch (command) { 1143 case SIOCSIFMTU: 1144 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1145 error = EINVAL; 1146 } else { 1147 ifp->if_mtu = ifr->ifr_mtu; 1148 1149 /* set maximum frame length */ 1150 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1151 ENET_REG_WRITE(sc, ENET_FTRL, v); 1152 v = ENET_REG_READ(sc, ENET_RCR); 1153 v &= ~ENET_RCR_MAX_FL(0x3fff); 1154 v |= ENET_RCR_MAX_FL(ifp->if_mtu + 1155 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1156 ENET_REG_WRITE(sc, ENET_RCR, v); 1157 } 1158 break; 1159 case SIOCSIFMEDIA: 1160 case SIOCGIFMEDIA: 1161 /* Flow control requires full-duplex mode. */ 1162 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1163 (ifr->ifr_media & IFM_FDX) == 0) 1164 ifr->ifr_media &= ~IFM_ETH_FMASK; 1165 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1166 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1167 /* We can do both TXPAUSE and RXPAUSE. */ 1168 ifr->ifr_media |= 1169 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1170 } 1171 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1172 } 1173 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1174 break; 1175 default: 1176 error = ether_ioctl(ifp, command, data); 1177 if (error != ENETRESET) 1178 break; 1179 1180 /* post-process */ 1181 error = 0; 1182 switch (command) { 1183 case SIOCSIFCAP: 1184 error = (*ifp->if_init)(ifp); 1185 break; 1186 case SIOCADDMULTI: 1187 case SIOCDELMULTI: 1188 if (ifp->if_flags & IFF_RUNNING) 1189 enet_setmulti(sc); 1190 break; 1191 } 1192 break; 1193 } 1194 1195 splx(s); 1196 1197 return error; 1198 } 1199 1200 /* 1201 * for MII 1202 */ 1203 static int 1204 enet_miibus_readreg(device_t dev, int phy, int reg) 1205 { 1206 struct enet_softc *sc; 1207 int timeout; 1208 uint32_t val, status; 1209 1210 sc = device_private(dev); 1211 1212 /* clear MII update */ 1213 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1214 1215 /* read command */ 1216 ENET_REG_WRITE(sc, ENET_MMFR, 1217 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1218 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1219 1220 /* check MII update */ 1221 for (timeout = 5000; timeout > 0; --timeout) { 1222 status = ENET_REG_READ(sc, ENET_EIR); 1223 if (status & ENET_EIR_MII) 1224 break; 1225 } 1226 if (timeout <= 0) { 1227 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1228 reg); 1229 val = -1; 1230 } else { 1231 val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1232 } 1233 1234 return val; 1235 } 1236 1237 static void 1238 enet_miibus_writereg(device_t dev, int phy, int reg, int val) 1239 { 1240 struct enet_softc *sc; 1241 int timeout; 1242 1243 sc = device_private(dev); 1244 1245 /* clear MII update */ 1246 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1247 1248 /* write command */ 1249 ENET_REG_WRITE(sc, ENET_MMFR, 1250 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1251 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1252 (ENET_MMFR_DATAMASK & val)); 1253 1254 /* check MII update */ 1255 for (timeout = 5000; timeout > 0; --timeout) { 1256 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1257 break; 1258 } 1259 if (timeout <= 0) { 1260 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", 1261 reg); 1262 } 1263 } 1264 1265 static void 1266 enet_miibus_statchg(struct ifnet *ifp) 1267 { 1268 struct enet_softc *sc; 1269 struct mii_data *mii; 1270 struct ifmedia_entry *ife; 1271 uint32_t ecr, ecr0; 1272 uint32_t rcr, rcr0; 1273 uint32_t tcr, tcr0; 1274 1275 sc = ifp->if_softc; 1276 mii = &sc->sc_mii; 1277 ife = mii->mii_media.ifm_cur; 1278 1279 /* get current status */ 1280 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1281 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1282 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1283 1284 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1285 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1286 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1287 mii->mii_media_active &= ~IFM_ETH_FMASK; 1288 } 1289 1290 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1291 tcr |= ENET_TCR_FDEN; /* full duplex */ 1292 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1293 } else { 1294 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1295 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1296 } 1297 1298 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1299 /* 1300 * need to reset because 1301 * FDEN can change when ECR[ETHEREN] is 0 1302 */ 1303 enet_init_regs(sc, 0); 1304 return; 1305 } 1306 1307 switch (IFM_SUBTYPE(ife->ifm_media)) { 1308 case IFM_AUTO: 1309 case IFM_1000_T: 1310 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1311 break; 1312 case IFM_100_TX: 1313 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1314 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1315 break; 1316 case IFM_10_T: 1317 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1318 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1319 break; 1320 default: 1321 ecr = ecr0; 1322 rcr = rcr0; 1323 tcr = tcr0; 1324 break; 1325 } 1326 1327 if (sc->sc_flowflags & IFM_FLOW) 1328 rcr |= ENET_RCR_FCE; 1329 else 1330 rcr &= ~ENET_RCR_FCE; 1331 1332 /* update registers if need change */ 1333 if (ecr != ecr0) 1334 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1335 if (rcr != rcr0) 1336 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1337 if (tcr != tcr0) 1338 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1339 } 1340 1341 /* 1342 * handling descriptors 1343 */ 1344 static void 1345 enet_init_txring(struct enet_softc *sc) 1346 { 1347 int i; 1348 1349 /* build TX ring */ 1350 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1351 sc->sc_txdesc_ring[i].tx_flags1_len = 1352 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1353 sc->sc_txdesc_ring[i].tx_databuf = 0; 1354 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1355 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1356 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1357 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1358 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1359 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1360 1361 TXDESC_WRITEOUT(i); 1362 } 1363 1364 sc->sc_tx_free = ENET_TX_RING_CNT; 1365 sc->sc_tx_considx = 0; 1366 sc->sc_tx_prodidx = 0; 1367 } 1368 1369 static int 1370 enet_init_rxring(struct enet_softc *sc) 1371 { 1372 int i, error; 1373 1374 /* build RX ring */ 1375 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1376 error = enet_alloc_rxbuf(sc, i); 1377 if (error != 0) 1378 return error; 1379 } 1380 1381 sc->sc_rx_readidx = 0; 1382 1383 return 0; 1384 } 1385 1386 static int 1387 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1388 { 1389 struct mbuf *m; 1390 int error; 1391 1392 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1393 1394 /* free mbuf if already allocated */ 1395 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1396 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1397 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1398 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1399 } 1400 1401 /* allocate new mbuf cluster */ 1402 MGETHDR(m, M_DONTWAIT, MT_DATA); 1403 if (m == NULL) 1404 return ENOBUFS; 1405 MCLGET(m, M_DONTWAIT); 1406 if (!(m->m_flags & M_EXT)) { 1407 m_freem(m); 1408 return ENOBUFS; 1409 } 1410 m->m_len = MCLBYTES; 1411 m->m_next = NULL; 1412 1413 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1414 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1415 BUS_DMA_READ | BUS_DMA_NOWAIT); 1416 if (error) { 1417 m_freem(m); 1418 return error; 1419 } 1420 1421 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1422 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1423 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1424 1425 sc->sc_rxsoft[idx].rxs_mbuf = m; 1426 enet_reset_rxdesc(sc, idx); 1427 return 0; 1428 } 1429 1430 static void 1431 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1432 { 1433 uint32_t paddr; 1434 1435 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1436 1437 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1438 RXFLAGS1_E | 1439 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1440 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1441 sc->sc_rxdesc_ring[idx].rx_flags2 = 1442 RXFLAGS2_INT; 1443 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1444 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1445 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1446 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1447 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1448 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1449 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1450 1451 RXDESC_WRITEOUT(idx); 1452 } 1453 1454 static void 1455 enet_drain_txbuf(struct enet_softc *sc) 1456 { 1457 int idx; 1458 struct enet_txsoft *txs; 1459 struct ifnet *ifp; 1460 1461 ifp = &sc->sc_ethercom.ec_if; 1462 1463 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1464 idx = ENET_TX_NEXTIDX(idx)) { 1465 1466 /* txsoft[] is used only first segment */ 1467 txs = &sc->sc_txsoft[idx]; 1468 TXDESC_READIN(idx); 1469 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1470 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1471 bus_dmamap_unload(sc->sc_dmat, 1472 txs->txs_dmamap); 1473 m_freem(txs->txs_mbuf); 1474 1475 ifp->if_oerrors++; 1476 } 1477 sc->sc_tx_free++; 1478 } 1479 } 1480 1481 static void 1482 enet_drain_rxbuf(struct enet_softc *sc) 1483 { 1484 int i; 1485 1486 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1487 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1488 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1489 bus_dmamap_unload(sc->sc_dmat, 1490 sc->sc_rxsoft[i].rxs_dmamap); 1491 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1492 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1493 } 1494 } 1495 } 1496 1497 static int 1498 enet_alloc_ring(struct enet_softc *sc) 1499 { 1500 int i, error; 1501 1502 /* 1503 * build DMA maps for TX. 1504 * TX descriptor must be able to contain mbuf chains, 1505 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1506 */ 1507 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1508 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1509 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1510 &sc->sc_txsoft[i].txs_dmamap); 1511 1512 if (error) { 1513 aprint_error_dev(sc->sc_dev, 1514 "can't create DMA map for TX descs\n"); 1515 goto fail_1; 1516 } 1517 } 1518 1519 /* 1520 * build DMA maps for RX. 1521 * RX descripter contains An mbuf cluster, 1522 * and make up a dmamap. 1523 */ 1524 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1525 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1526 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1527 &sc->sc_rxsoft[i].rxs_dmamap); 1528 if (error) { 1529 aprint_error_dev(sc->sc_dev, 1530 "can't create DMA map for RX descs\n"); 1531 goto fail_2; 1532 } 1533 } 1534 1535 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1536 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1537 return -1; 1538 memset(sc->sc_txdesc_ring, 0, 1539 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1540 1541 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1542 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1543 return -1; 1544 memset(sc->sc_rxdesc_ring, 0, 1545 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1546 1547 return 0; 1548 1549 fail_2: 1550 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1551 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1552 bus_dmamap_destroy(sc->sc_dmat, 1553 sc->sc_rxsoft[i].rxs_dmamap); 1554 } 1555 fail_1: 1556 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1557 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1558 bus_dmamap_destroy(sc->sc_dmat, 1559 sc->sc_txsoft[i].txs_dmamap); 1560 } 1561 return error; 1562 } 1563 1564 static int 1565 enet_encap_mbufalign(struct mbuf **mp) 1566 { 1567 struct mbuf *m, *m0, *mt, *p, *x; 1568 void *ap; 1569 uint32_t alignoff, chiplen; 1570 1571 /* 1572 * iMX6 SoC ethernet controller requires 1573 * address of buffer must aligned 8, and 1574 * length of buffer must be greater than 10 (first fragment only?) 1575 */ 1576 #define ALIGNBYTE 8 1577 #define MINBUFSIZE 10 1578 #define ALIGN_PTR(p, align) \ 1579 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1580 1581 m0 = *mp; 1582 mt = p = NULL; 1583 for (m = m0; m != NULL; m = m->m_next) { 1584 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1585 if (m->m_len < (ALIGNBYTE * 2)) { 1586 /* 1587 * rearrange mbuf data aligned 1588 * 1589 * align 8 * * * * * 1590 * +0123456789abcdef0123456789abcdef0 1591 * FROM m->m_data[___________abcdefghijklmn_______] 1592 * 1593 * +0123456789abcdef0123456789abcdef0 1594 * TO m->m_data[________abcdefghijklm___________] or 1595 * m->m_data[________________abcdefghijklmn__] 1596 */ 1597 if ((alignoff != 0) && (m->m_len != 0)) { 1598 chiplen = ALIGNBYTE - alignoff; 1599 if (M_LEADINGSPACE(m) >= alignoff) { 1600 ap = m->m_data - alignoff; 1601 memmove(ap, m->m_data, m->m_len); 1602 m->m_data = ap; 1603 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1604 ap = m->m_data + chiplen; 1605 memmove(ap, m->m_data, m->m_len); 1606 m->m_data = ap; 1607 } else { 1608 /* 1609 * no space to align data. (M_READONLY?) 1610 * allocate new mbuf aligned, 1611 * and copy to it. 1612 */ 1613 MGET(x, M_DONTWAIT, m->m_type); 1614 if (x == NULL) { 1615 m_freem(m); 1616 return ENOBUFS; 1617 } 1618 MCLAIM(x, m->m_owner); 1619 if (m->m_flags & M_PKTHDR) 1620 M_MOVE_PKTHDR(x, m); 1621 x->m_len = m->m_len; 1622 x->m_data = ALIGN_PTR(x->m_data, 1623 ALIGNBYTE); 1624 memcpy(mtod(x, void *), mtod(m, void *), 1625 m->m_len); 1626 p->m_next = x; 1627 MFREE(m, x->m_next); 1628 m = x; 1629 } 1630 } 1631 1632 /* 1633 * fill 1st mbuf at least 10byte 1634 * 1635 * align 8 * * * * * 1636 * +0123456789abcdef0123456789abcdef0 1637 * FROM m->m_data[________abcde___________________] 1638 * m->m_data[__fg____________________________] 1639 * m->m_data[_________________hi_____________] 1640 * m->m_data[__________jk____________________] 1641 * m->m_data[____l___________________________] 1642 * 1643 * +0123456789abcdef0123456789abcdef0 1644 * TO m->m_data[________abcdefghij______________] 1645 * m->m_data[________________________________] 1646 * m->m_data[________________________________] 1647 * m->m_data[___________k____________________] 1648 * m->m_data[____l___________________________] 1649 */ 1650 if (mt == NULL) { 1651 mt = m; 1652 while (mt->m_len == 0) { 1653 mt = mt->m_next; 1654 if (mt == NULL) { 1655 m_freem(m); 1656 return ENOBUFS; 1657 } 1658 } 1659 1660 /* mt = 1st mbuf, x = 2nd mbuf */ 1661 x = mt->m_next; 1662 while (mt->m_len < MINBUFSIZE) { 1663 if (x == NULL) { 1664 m_freem(m); 1665 return ENOBUFS; 1666 } 1667 1668 alignoff = (uintptr_t)x->m_data & 1669 (ALIGNBYTE - 1); 1670 chiplen = ALIGNBYTE - alignoff; 1671 if (chiplen > x->m_len) { 1672 chiplen = x->m_len; 1673 } else if ((mt->m_len + chiplen) < 1674 MINBUFSIZE) { 1675 /* 1676 * next mbuf should be greater 1677 * than ALIGNBYTE? 1678 */ 1679 if (x->m_len >= (chiplen + 1680 ALIGNBYTE * 2)) 1681 chiplen += ALIGNBYTE; 1682 else 1683 chiplen = x->m_len; 1684 } 1685 1686 if (chiplen && 1687 (M_TRAILINGSPACE(mt) < chiplen)) { 1688 /* 1689 * move data to the begining of 1690 * m_dat[] (aligned) to en- 1691 * large trailingspace 1692 */ 1693 if (mt->m_flags & M_EXT) { 1694 ap = mt->m_ext.ext_buf; 1695 } else if (mt->m_flags & 1696 M_PKTHDR) { 1697 ap = mt->m_pktdat; 1698 } else { 1699 ap = mt->m_dat; 1700 } 1701 ap = ALIGN_PTR(ap, ALIGNBYTE); 1702 memcpy(ap, mt->m_data, mt->m_len); 1703 mt->m_data = ap; 1704 } 1705 1706 if (chiplen && 1707 (M_TRAILINGSPACE(mt) >= chiplen)) { 1708 memcpy(mt->m_data + mt->m_len, 1709 x->m_data, chiplen); 1710 mt->m_len += chiplen; 1711 m_adj(x, chiplen); 1712 } 1713 1714 x = x->m_next; 1715 } 1716 } 1717 1718 } else { 1719 mt = m; 1720 1721 /* 1722 * allocate new mbuf x, and rearrange as below; 1723 * 1724 * align 8 * * * * * 1725 * +0123456789abcdef0123456789abcdef0 1726 * FROM m->m_data[____________abcdefghijklmnopq___] 1727 * 1728 * +0123456789abcdef0123456789abcdef0 1729 * TO x->m_data[________abcdefghijkl____________] 1730 * m->m_data[________________________mnopq___] 1731 * 1732 */ 1733 if (alignoff != 0) { 1734 /* at least ALIGNBYTE */ 1735 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1736 1737 MGET(x, M_DONTWAIT, m->m_type); 1738 if (x == NULL) { 1739 m_freem(m); 1740 return ENOBUFS; 1741 } 1742 MCLAIM(x, m->m_owner); 1743 if (m->m_flags & M_PKTHDR) 1744 M_MOVE_PKTHDR(x, m); 1745 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1746 memcpy(mtod(x, void *), mtod(m, void *), 1747 chiplen); 1748 x->m_len = chiplen; 1749 x->m_next = m; 1750 m_adj(m, chiplen); 1751 1752 if (p == NULL) 1753 m0 = x; 1754 else 1755 p->m_next = x; 1756 } 1757 } 1758 p = m; 1759 } 1760 *mp = m0; 1761 1762 return 0; 1763 } 1764 1765 static int 1766 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1767 { 1768 bus_dmamap_t map; 1769 struct mbuf *m; 1770 int csumflags, idx, i, error; 1771 uint32_t flags1, flags2; 1772 1773 idx = sc->sc_tx_prodidx; 1774 map = sc->sc_txsoft[idx].txs_dmamap; 1775 1776 /* align mbuf data for claim of ENET */ 1777 error = enet_encap_mbufalign(mp); 1778 if (error != 0) 1779 return error; 1780 1781 m = *mp; 1782 csumflags = m->m_pkthdr.csum_flags; 1783 1784 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1785 BUS_DMA_NOWAIT); 1786 if (error != 0) { 1787 device_printf(sc->sc_dev, 1788 "Error mapping mbuf into TX chain: error=%d\n", error); 1789 m_freem(m); 1790 return error; 1791 } 1792 1793 if (map->dm_nsegs > sc->sc_tx_free) { 1794 bus_dmamap_unload(sc->sc_dmat, map); 1795 device_printf(sc->sc_dev, 1796 "too many mbuf chain %d\n", map->dm_nsegs); 1797 m_freem(m); 1798 return ENOBUFS; 1799 } 1800 1801 /* fill protocol cksum zero beforehand */ 1802 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1803 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1804 struct mbuf *m1; 1805 int ehlen, moff; 1806 uint16_t etype; 1807 1808 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1809 switch (ntohs(etype)) { 1810 case ETHERTYPE_IP: 1811 case ETHERTYPE_IPV6: 1812 ehlen = ETHER_HDR_LEN; 1813 break; 1814 case ETHERTYPE_VLAN: 1815 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1816 break; 1817 default: 1818 ehlen = 0; 1819 break; 1820 } 1821 1822 if (ehlen) { 1823 m1 = m_getptr(m, ehlen + 1824 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1825 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data), 1826 &moff); 1827 if (m1 != NULL) 1828 *(uint16_t *)(mtod(m1, char *) + moff) = 0; 1829 } 1830 } 1831 1832 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1833 BUS_DMASYNC_PREWRITE); 1834 1835 for (i = 0; i < map->dm_nsegs; i++) { 1836 flags1 = TXFLAGS1_R; 1837 flags2 = 0; 1838 1839 if (i == 0) { 1840 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1841 sc->sc_txsoft[idx].txs_mbuf = m; 1842 } 1843 1844 /* checksum offloading */ 1845 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1846 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1847 flags2 |= TXFLAGS2_PINS; 1848 if (csumflags & (M_CSUM_IPv4)) 1849 flags2 |= TXFLAGS2_IINS; 1850 1851 if (i == map->dm_nsegs - 1) { 1852 /* mark last segment */ 1853 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1854 flags2 |= TXFLAGS2_INT; 1855 } 1856 if (idx == ENET_TX_RING_CNT - 1) { 1857 /* mark end of ring */ 1858 flags1 |= TXFLAGS1_W; 1859 } 1860 1861 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1862 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1863 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1864 sc->sc_txdesc_ring[idx].tx_flags1_len = 1865 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1866 1867 TXDESC_WRITEOUT(idx); 1868 1869 idx = ENET_TX_NEXTIDX(idx); 1870 sc->sc_tx_free--; 1871 } 1872 1873 sc->sc_tx_prodidx = idx; 1874 1875 return 0; 1876 } 1877 1878 /* 1879 * device initialize 1880 */ 1881 static int 1882 enet_init_plls(struct enet_softc *sc) 1883 { 1884 #if NIMXCCM > 0 1885 /* PLL power up */ 1886 if (imx6_pll_power(CCM_ANALOG_PLL_ENET, 1) != 0) { 1887 aprint_error_dev(sc->sc_dev, 1888 "couldn't enable CCM_ANALOG_PLL_ENET\n"); 1889 return -1; 1890 } 1891 #endif 1892 1893 return 0; 1894 } 1895 1896 static int 1897 enet_init_regs(struct enet_softc *sc, int init) 1898 { 1899 struct mii_data *mii; 1900 struct ifmedia_entry *ife; 1901 paddr_t paddr; 1902 uint32_t val; 1903 int fulldup, ecr_speed, rcr_speed, flowctrl; 1904 1905 if (init) { 1906 fulldup = 1; 1907 ecr_speed = ENET_ECR_SPEED; 1908 rcr_speed = 0; 1909 flowctrl = 0; 1910 } else { 1911 mii = &sc->sc_mii; 1912 ife = mii->mii_media.ifm_cur; 1913 1914 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1915 fulldup = 1; 1916 else 1917 fulldup = 0; 1918 1919 switch (IFM_SUBTYPE(ife->ifm_media)) { 1920 case IFM_10_T: 1921 ecr_speed = 0; 1922 rcr_speed = ENET_RCR_RMII_10T; 1923 break; 1924 case IFM_100_TX: 1925 ecr_speed = 0; 1926 rcr_speed = 0; 1927 break; 1928 default: 1929 ecr_speed = ENET_ECR_SPEED; 1930 rcr_speed = 0; 1931 break; 1932 } 1933 1934 flowctrl = sc->sc_flowflags & IFM_FLOW; 1935 } 1936 1937 /* reset */ 1938 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1939 1940 /* mask and clear all interrupt */ 1941 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1942 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1943 1944 /* full duplex */ 1945 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1946 1947 /* clear and enable MIB register */ 1948 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1949 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1950 1951 /* MII speed setup. MDCclk(=2.5MHz) = PLL6clk/((val+1)*2) */ 1952 val = (imx6_get_clock(IMX6CLK_PLL6) / 500000 - 1) / 10; 1953 ENET_REG_WRITE(sc, ENET_MSCR, val); 1954 1955 /* Opcode/Pause Duration */ 1956 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1957 1958 /* Receive FIFO */ 1959 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1960 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1961 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1962 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1963 1964 /* Transmit FIFO */ 1965 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1966 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1967 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1968 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1969 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1970 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1971 1972 /* hardware checksum is default off (override in TX descripter) */ 1973 ENET_REG_WRITE(sc, ENET_TACC, 0); 1974 1975 /* 1976 * align ethernet payload on 32bit, discard frames with MAC layer error, 1977 * and don't discard checksum error 1978 */ 1979 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1980 1981 /* maximum frame size */ 1982 val = ENET_DEFAULT_PKT_LEN; 1983 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1984 ENET_REG_WRITE(sc, ENET_RCR, 1985 ENET_RCR_PADEN | /* RX frame padding remove */ 1986 ENET_RCR_RGMII_EN | /* use RGMII */ 1987 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1988 rcr_speed | 1989 (fulldup ? 0 : ENET_RCR_DRT) | 1990 ENET_RCR_MAX_FL(val)); 1991 1992 /* Maximum Receive BufSize per one descriptor */ 1993 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1994 1995 1996 /* TX/RX Descriptor Physical Address */ 1997 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1998 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1999 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 2000 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 2001 /* sync cache */ 2002 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 2003 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2004 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 2005 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2006 2007 /* enable interrupts */ 2008 ENET_REG_WRITE(sc, ENET_EIMR, 2009 ENET_EIR_TXF | 2010 ENET_EIR_RXF | 2011 ENET_EIR_EBERR | 2012 0); 2013 2014 /* enable ether */ 2015 ENET_REG_WRITE(sc, ENET_ECR, 2016 #if _BYTE_ORDER == _LITTLE_ENDIAN 2017 ENET_ECR_DBSWP | 2018 #endif 2019 ENET_ECR_SPEED | /* default 1000Mbps mode */ 2020 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 2021 ENET_ECR_ETHEREN); /* Ethernet Enable */ 2022 2023 return 0; 2024 } 2025 2026 static int 2027 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 2028 bus_dmamap_t *mapp) 2029 { 2030 bus_dma_segment_t seglist[1]; 2031 int nsegs, error; 2032 2033 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 2034 1, &nsegs, M_NOWAIT)) != 0) { 2035 device_printf(sc->sc_dev, 2036 "unable to allocate DMA buffer, error=%d\n", error); 2037 goto fail_alloc; 2038 } 2039 2040 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 2041 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2042 device_printf(sc->sc_dev, 2043 "unable to map DMA buffer, error=%d\n", 2044 error); 2045 goto fail_map; 2046 } 2047 2048 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2049 BUS_DMA_NOWAIT, mapp)) != 0) { 2050 device_printf(sc->sc_dev, 2051 "unable to create DMA map, error=%d\n", error); 2052 goto fail_create; 2053 } 2054 2055 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 2056 BUS_DMA_NOWAIT)) != 0) { 2057 aprint_error_dev(sc->sc_dev, 2058 "unable to load DMA map, error=%d\n", error); 2059 goto fail_load; 2060 } 2061 2062 return 0; 2063 2064 fail_load: 2065 bus_dmamap_destroy(sc->sc_dmat, *mapp); 2066 fail_create: 2067 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 2068 fail_map: 2069 bus_dmamem_free(sc->sc_dmat, seglist, 1); 2070 fail_alloc: 2071 return error; 2072 } 2073