1 /* $NetBSD: if_enet.c,v 1.3 2015/04/13 21:18:41 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.3 2015/04/13 21:18:41 riastradh Exp $"); 35 36 #include "imxocotp.h" 37 #include "imxccm.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/bus.h> 42 #include <sys/mbuf.h> 43 #include <sys/device.h> 44 #include <sys/sockio.h> 45 #include <sys/kernel.h> 46 #include <sys/rndsource.h> 47 48 #include <lib/libkern/libkern.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_ether.h> 54 #include <net/bpf.h> 55 #include <net/if_vlanvar.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <arm/imx/imx6var.h> 65 #include <arm/imx/imx6_reg.h> 66 #include <arm/imx/imx6_ocotpreg.h> 67 #include <arm/imx/imx6_ocotpvar.h> 68 #include <arm/imx/imx6_ccmreg.h> 69 #include <arm/imx/imx6_ccmvar.h> 70 #include <arm/imx/if_enetreg.h> 71 #include "locators.h" 72 73 #undef DEBUG_ENET 74 #undef ENET_EVENT_COUNTER 75 76 #ifdef DEBUG_ENET 77 int enet_debug = 0; 78 # define DEVICE_DPRINTF(args...) \ 79 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 80 #else 81 # define DEVICE_DPRINTF(args...) 82 #endif 83 84 85 #define RXDESC_MAXBUFSIZE 0x07f0 86 /* iMX6 ENET not work greather than 0x0800... */ 87 88 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 89 #ifdef ENET_SUPPORT_JUMBO 90 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 91 #else 92 # define ENET_MAX_PKT_LEN 1522 93 #endif 94 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 95 #define MTU2FRAMESIZE(n) \ 96 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 97 98 99 #define ENET_MAX_PKT_NSEGS 64 100 #define ENET_TX_RING_CNT 256 /* must be 2^n */ 101 #define ENET_RX_RING_CNT 256 /* must be 2^n */ 102 103 #define ENET_TX_NEXTIDX(idx) (((idx) + 1) & (ENET_TX_RING_CNT - 1)) 104 #define ENET_RX_NEXTIDX(idx) (((idx) + 1) & (ENET_RX_RING_CNT - 1)) 105 106 struct enet_txsoft { 107 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 108 bus_dmamap_t txs_dmamap; /* our DMA map */ 109 }; 110 111 struct enet_rxsoft { 112 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 113 bus_dmamap_t rxs_dmamap; /* our DMA map */ 114 }; 115 116 struct enet_softc { 117 device_t sc_dev; 118 119 bus_addr_t sc_addr; 120 bus_space_tag_t sc_iot; 121 bus_space_handle_t sc_ioh; 122 bus_dma_tag_t sc_dmat; 123 124 /* interrupts */ 125 void *sc_ih; 126 callout_t sc_tick_ch; 127 bool sc_stopping; 128 129 /* TX */ 130 struct enet_txdesc *sc_txdesc_ring; /* [ENET_TX_RING_CNT] */ 131 bus_dmamap_t sc_txdesc_dmamap; 132 struct enet_rxdesc *sc_rxdesc_ring; /* [ENET_RX_RING_CNT] */ 133 bus_dmamap_t sc_rxdesc_dmamap; 134 struct enet_txsoft sc_txsoft[ENET_TX_RING_CNT]; 135 int sc_tx_considx; 136 int sc_tx_prodidx; 137 int sc_tx_free; 138 139 /* RX */ 140 struct enet_rxsoft sc_rxsoft[ENET_RX_RING_CNT]; 141 int sc_rx_readidx; 142 143 /* misc */ 144 int sc_if_flags; /* local copy of if_flags */ 145 int sc_flowflags; /* 802.3x flow control flags */ 146 struct ethercom sc_ethercom; /* interface info */ 147 struct mii_data sc_mii; 148 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 149 krndsource_t sc_rnd_source; 150 151 #ifdef ENET_EVENT_COUNTER 152 struct evcnt sc_ev_t_drop; 153 struct evcnt sc_ev_t_packets; 154 struct evcnt sc_ev_t_bc_pkt; 155 struct evcnt sc_ev_t_mc_pkt; 156 struct evcnt sc_ev_t_crc_align; 157 struct evcnt sc_ev_t_undersize; 158 struct evcnt sc_ev_t_oversize; 159 struct evcnt sc_ev_t_frag; 160 struct evcnt sc_ev_t_jab; 161 struct evcnt sc_ev_t_col; 162 struct evcnt sc_ev_t_p64; 163 struct evcnt sc_ev_t_p65to127n; 164 struct evcnt sc_ev_t_p128to255n; 165 struct evcnt sc_ev_t_p256to511; 166 struct evcnt sc_ev_t_p512to1023; 167 struct evcnt sc_ev_t_p1024to2047; 168 struct evcnt sc_ev_t_p_gte2048; 169 struct evcnt sc_ev_t_octets; 170 struct evcnt sc_ev_r_packets; 171 struct evcnt sc_ev_r_bc_pkt; 172 struct evcnt sc_ev_r_mc_pkt; 173 struct evcnt sc_ev_r_crc_align; 174 struct evcnt sc_ev_r_undersize; 175 struct evcnt sc_ev_r_oversize; 176 struct evcnt sc_ev_r_frag; 177 struct evcnt sc_ev_r_jab; 178 struct evcnt sc_ev_r_p64; 179 struct evcnt sc_ev_r_p65to127; 180 struct evcnt sc_ev_r_p128to255; 181 struct evcnt sc_ev_r_p256to511; 182 struct evcnt sc_ev_r_p512to1023; 183 struct evcnt sc_ev_r_p1024to2047; 184 struct evcnt sc_ev_r_p_gte2048; 185 struct evcnt sc_ev_r_octets; 186 #endif /* ENET_EVENT_COUNTER */ 187 }; 188 189 #define TXDESC_WRITEOUT(idx) \ 190 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 191 sizeof(struct enet_txdesc) * (idx), \ 192 sizeof(struct enet_txdesc), \ 193 BUS_DMASYNC_PREWRITE) 194 195 #define TXDESC_READIN(idx) \ 196 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 197 sizeof(struct enet_txdesc) * (idx), \ 198 sizeof(struct enet_txdesc), \ 199 BUS_DMASYNC_PREREAD) 200 201 #define RXDESC_WRITEOUT(idx) \ 202 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 203 sizeof(struct enet_rxdesc) * (idx), \ 204 sizeof(struct enet_rxdesc), \ 205 BUS_DMASYNC_PREWRITE) 206 207 #define RXDESC_READIN(idx) \ 208 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 209 sizeof(struct enet_rxdesc) * (idx), \ 210 sizeof(struct enet_rxdesc), \ 211 BUS_DMASYNC_PREREAD) 212 213 #define ENET_REG_READ(sc, reg) \ 214 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 215 216 #define ENET_REG_WRITE(sc, reg, value) \ 217 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 218 219 static int enet_match(device_t, struct cfdata *, void *); 220 static void enet_attach(device_t, device_t, void *); 221 #ifdef ENET_EVENT_COUNTER 222 static void enet_attach_evcnt(struct enet_softc *); 223 static void enet_update_evcnt(struct enet_softc *); 224 #endif 225 226 static int enet_intr(void *); 227 static void enet_tick(void *); 228 static int enet_tx_intr(void *); 229 static int enet_rx_intr(void *); 230 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 231 int); 232 233 static void enet_start(struct ifnet *); 234 static int enet_ifflags_cb(struct ethercom *); 235 static int enet_ioctl(struct ifnet *, u_long, void *); 236 static int enet_init(struct ifnet *); 237 static void enet_stop(struct ifnet *, int); 238 static void enet_watchdog(struct ifnet *); 239 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 240 241 static int enet_miibus_readreg(device_t, int, int); 242 static void enet_miibus_writereg(device_t, int, int, int); 243 static void enet_miibus_statchg(struct ifnet *); 244 245 static void enet_ocotp_getmacaddr(uint8_t *); 246 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 247 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 248 static void enet_setmulti(struct enet_softc *); 249 static int enet_encap_mbufalign(struct mbuf **); 250 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 251 static int enet_init_plls(struct enet_softc *); 252 static int enet_init_regs(struct enet_softc *, int); 253 static int enet_alloc_ring(struct enet_softc *); 254 static void enet_init_txring(struct enet_softc *); 255 static int enet_init_rxring(struct enet_softc *); 256 static void enet_reset_rxdesc(struct enet_softc *, int); 257 static int enet_alloc_rxbuf(struct enet_softc *, int); 258 static void enet_drain_txbuf(struct enet_softc *); 259 static void enet_drain_rxbuf(struct enet_softc *); 260 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 261 bus_dmamap_t *); 262 263 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 264 enet_match, enet_attach, NULL, NULL); 265 266 /* ARGSUSED */ 267 static int 268 enet_match(device_t parent __unused, struct cfdata *match __unused, void *aux) 269 { 270 struct axi_attach_args *aa; 271 272 aa = aux; 273 274 switch (aa->aa_addr) { 275 case (IMX6_AIPS2_BASE + AIPS2_ENET_BASE): 276 return 1; 277 } 278 279 return 0; 280 } 281 282 /* ARGSUSED */ 283 static void 284 enet_attach(device_t parent __unused, device_t self, void *aux) 285 { 286 struct enet_softc *sc; 287 struct axi_attach_args *aa; 288 struct ifnet *ifp; 289 290 aa = aux; 291 sc = device_private(self); 292 sc->sc_dev = self; 293 sc->sc_iot = aa->aa_iot; 294 sc->sc_addr = aa->aa_addr; 295 sc->sc_dmat = aa->aa_dmat; 296 297 if (aa->aa_size == AXICF_SIZE_DEFAULT) 298 aa->aa_size = AIPS2_ENET_SIZE; 299 300 aprint_naive("\n"); 301 aprint_normal(": Gigabit Ethernet Controller\n"); 302 if (bus_space_map(sc->sc_iot, sc->sc_addr, aa->aa_size, 0, 303 &sc->sc_ioh)) { 304 aprint_error_dev(self, "cannot map registers\n"); 305 return; 306 } 307 308 /* allocate dma buffer */ 309 if (enet_alloc_ring(sc)) 310 return; 311 312 #define IS_ENADDR_ZERO(enaddr) \ 313 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 314 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 315 316 /* get mac-address from SoC eFuse */ 317 enet_ocotp_getmacaddr(sc->sc_enaddr); 318 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 319 /* by any chance, mac-address is already set by bootloader? */ 320 enet_gethwaddr(sc, sc->sc_enaddr); 321 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 322 /* give up. set randomly */ 323 uint32_t addr = random(); 324 /* not multicast */ 325 sc->sc_enaddr[0] = (addr >> 24) & 0xfc; 326 sc->sc_enaddr[1] = addr >> 16; 327 sc->sc_enaddr[2] = addr >> 8; 328 sc->sc_enaddr[3] = addr; 329 addr = random(); 330 sc->sc_enaddr[4] = addr >> 8; 331 sc->sc_enaddr[5] = addr; 332 333 aprint_error_dev(self, 334 "cannot get mac address. set randomly\n"); 335 } 336 } 337 enet_sethwaddr(sc, sc->sc_enaddr); 338 339 aprint_normal_dev(self, "Ethernet address %s\n", 340 ether_sprintf(sc->sc_enaddr)); 341 342 /* power up and init */ 343 if (enet_init_plls(sc) != 0) 344 goto failure; 345 enet_init_regs(sc, 1); 346 347 /* setup interrupt handlers */ 348 if ((sc->sc_ih = intr_establish(aa->aa_irq, IPL_NET, 349 IST_LEVEL, enet_intr, sc)) == NULL) { 350 aprint_error_dev(self, "unable to establish interrupt\n"); 351 goto failure; 352 } 353 354 /* callout will be scheduled from enet_init() */ 355 callout_init(&sc->sc_tick_ch, 0); 356 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 357 358 /* setup ifp */ 359 ifp = &sc->sc_ethercom.ec_if; 360 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 361 ifp->if_softc = sc; 362 ifp->if_mtu = ETHERMTU; 363 ifp->if_baudrate = IF_Gbps(1); 364 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 365 ifp->if_ioctl = enet_ioctl; 366 ifp->if_start = enet_start; 367 ifp->if_init = enet_init; 368 ifp->if_stop = enet_stop; 369 ifp->if_watchdog = enet_watchdog; 370 371 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 372 #ifdef ENET_SUPPORT_JUMBO 373 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 374 #endif 375 376 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 377 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 378 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 379 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 380 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 381 382 IFQ_SET_MAXLEN(&ifp->if_snd, max(ENET_TX_RING_CNT, IFQ_MAXLEN)); 383 IFQ_SET_READY(&ifp->if_snd); 384 385 /* setup MII */ 386 sc->sc_ethercom.ec_mii = &sc->sc_mii; 387 sc->sc_mii.mii_ifp = ifp; 388 sc->sc_mii.mii_readreg = enet_miibus_readreg; 389 sc->sc_mii.mii_writereg = enet_miibus_writereg; 390 sc->sc_mii.mii_statchg = enet_miibus_statchg; 391 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 392 enet_mediastatus); 393 394 /* try to attach PHY */ 395 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 396 MII_OFFSET_ANY, 0); 397 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 398 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 399 0, NULL); 400 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 401 } else { 402 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 403 } 404 405 if_attach(ifp); 406 ether_ifattach(ifp, sc->sc_enaddr); 407 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 408 409 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 410 RND_TYPE_NET, RND_FLAG_DEFAULT); 411 412 #ifdef ENET_EVENT_COUNTER 413 enet_attach_evcnt(sc); 414 #endif 415 416 sc->sc_stopping = false; 417 418 return; 419 420 failure: 421 bus_space_unmap(sc->sc_iot, sc->sc_ioh, aa->aa_size); 422 return; 423 } 424 425 #ifdef ENET_EVENT_COUNTER 426 static void 427 enet_attach_evcnt(struct enet_softc *sc) 428 { 429 const char *xname; 430 431 xname = device_xname(sc->sc_dev); 432 433 #define ENET_EVCNT_ATTACH(name) \ 434 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 435 NULL, xname, #name); 436 437 ENET_EVCNT_ATTACH(t_drop); 438 ENET_EVCNT_ATTACH(t_packets); 439 ENET_EVCNT_ATTACH(t_bc_pkt); 440 ENET_EVCNT_ATTACH(t_mc_pkt); 441 ENET_EVCNT_ATTACH(t_crc_align); 442 ENET_EVCNT_ATTACH(t_undersize); 443 ENET_EVCNT_ATTACH(t_oversize); 444 ENET_EVCNT_ATTACH(t_frag); 445 ENET_EVCNT_ATTACH(t_jab); 446 ENET_EVCNT_ATTACH(t_col); 447 ENET_EVCNT_ATTACH(t_p64); 448 ENET_EVCNT_ATTACH(t_p65to127n); 449 ENET_EVCNT_ATTACH(t_p128to255n); 450 ENET_EVCNT_ATTACH(t_p256to511); 451 ENET_EVCNT_ATTACH(t_p512to1023); 452 ENET_EVCNT_ATTACH(t_p1024to2047); 453 ENET_EVCNT_ATTACH(t_p_gte2048); 454 ENET_EVCNT_ATTACH(t_octets); 455 ENET_EVCNT_ATTACH(r_packets); 456 ENET_EVCNT_ATTACH(r_bc_pkt); 457 ENET_EVCNT_ATTACH(r_mc_pkt); 458 ENET_EVCNT_ATTACH(r_crc_align); 459 ENET_EVCNT_ATTACH(r_undersize); 460 ENET_EVCNT_ATTACH(r_oversize); 461 ENET_EVCNT_ATTACH(r_frag); 462 ENET_EVCNT_ATTACH(r_jab); 463 ENET_EVCNT_ATTACH(r_p64); 464 ENET_EVCNT_ATTACH(r_p65to127); 465 ENET_EVCNT_ATTACH(r_p128to255); 466 ENET_EVCNT_ATTACH(r_p256to511); 467 ENET_EVCNT_ATTACH(r_p512to1023); 468 ENET_EVCNT_ATTACH(r_p1024to2047); 469 ENET_EVCNT_ATTACH(r_p_gte2048); 470 ENET_EVCNT_ATTACH(r_octets); 471 } 472 473 static void 474 enet_update_evcnt(struct enet_softc *sc) 475 { 476 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 477 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 478 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 479 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 480 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 481 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 482 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 483 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 484 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 485 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 486 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 487 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 488 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 489 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 490 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 491 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 492 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 493 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 494 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 495 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 496 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 497 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 498 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 499 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 500 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 501 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 502 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 503 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 504 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 505 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 506 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 507 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 508 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 509 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 510 } 511 #endif /* ENET_EVENT_COUNTER */ 512 513 static void 514 enet_tick(void *arg) 515 { 516 struct enet_softc *sc; 517 struct mii_data *mii; 518 struct ifnet *ifp; 519 int s; 520 521 sc = arg; 522 mii = &sc->sc_mii; 523 ifp = &sc->sc_ethercom.ec_if; 524 525 s = splnet(); 526 527 if (sc->sc_stopping) 528 goto out; 529 530 531 #ifdef ENET_EVENT_COUNTER 532 enet_update_evcnt(sc); 533 #endif 534 535 /* update counters */ 536 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 537 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 538 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 539 540 /* clear counters */ 541 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 542 ENET_REG_WRITE(sc, ENET_MIBC, 0); 543 544 mii_tick(mii); 545 out: 546 547 if (!sc->sc_stopping) 548 callout_schedule(&sc->sc_tick_ch, hz); 549 550 splx(s); 551 } 552 553 static int 554 enet_intr(void *arg) 555 { 556 struct enet_softc *sc; 557 struct ifnet *ifp; 558 uint32_t status; 559 560 sc = arg; 561 status = ENET_REG_READ(sc, ENET_EIR); 562 563 if (status & ENET_EIR_TXF) 564 enet_tx_intr(arg); 565 566 if (status & ENET_EIR_RXF) 567 enet_rx_intr(arg); 568 569 if (status & ENET_EIR_EBERR) { 570 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 571 ifp = &sc->sc_ethercom.ec_if; 572 enet_stop(ifp, 1); 573 enet_init(ifp); 574 } else { 575 ENET_REG_WRITE(sc, ENET_EIR, status); 576 } 577 578 rnd_add_uint32(&sc->sc_rnd_source, status); 579 580 return 1; 581 } 582 583 static int 584 enet_tx_intr(void *arg) 585 { 586 struct enet_softc *sc; 587 struct ifnet *ifp; 588 struct enet_txsoft *txs; 589 int idx; 590 591 sc = (struct enet_softc *)arg; 592 ifp = &sc->sc_ethercom.ec_if; 593 594 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 595 idx = ENET_TX_NEXTIDX(idx)) { 596 597 txs = &sc->sc_txsoft[idx]; 598 599 TXDESC_READIN(idx); 600 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 601 /* This TX Descriptor has not been transmitted yet */ 602 break; 603 } 604 605 /* txsoft is available on first segment (TXFLAGS1_T1) */ 606 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 607 bus_dmamap_unload(sc->sc_dmat, 608 txs->txs_dmamap); 609 m_freem(txs->txs_mbuf); 610 ifp->if_opackets++; 611 } 612 613 /* checking error */ 614 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 615 uint32_t flags2; 616 617 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 618 619 if (flags2 & (TXFLAGS2_TXE | 620 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 621 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 622 #ifdef DEBUG_ENET 623 if (enet_debug) { 624 char flagsbuf[128]; 625 626 snprintb(flagsbuf, sizeof(flagsbuf), 627 "\20" "\20TRANSMIT" "\16UNDERFLOW" 628 "\15COLLISION" "\14FRAME" 629 "\13LATECOLLISION" "\12OVERFLOW", 630 flags2); 631 632 device_printf(sc->sc_dev, 633 "txdesc[%d]: transmit error: " 634 "flags2=%s\n", idx, flagsbuf); 635 } 636 #endif /* DEBUG_ENET */ 637 ifp->if_oerrors++; 638 } 639 } 640 641 sc->sc_tx_free++; 642 } 643 sc->sc_tx_considx = idx; 644 645 if (sc->sc_tx_free > 0) 646 ifp->if_flags &= ~IFF_OACTIVE; 647 648 /* 649 * No more pending TX descriptor, 650 * cancel the watchdog timer. 651 */ 652 if (sc->sc_tx_free == ENET_TX_RING_CNT) 653 ifp->if_timer = 0; 654 655 return 1; 656 } 657 658 static int 659 enet_rx_intr(void *arg) 660 { 661 struct enet_softc *sc; 662 struct ifnet *ifp; 663 struct enet_rxsoft *rxs; 664 int idx, len, amount; 665 uint32_t flags1, flags2; 666 struct mbuf *m, *m0, *mprev; 667 668 sc = arg; 669 ifp = &sc->sc_ethercom.ec_if; 670 671 m0 = mprev = NULL; 672 amount = 0; 673 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 674 675 rxs = &sc->sc_rxsoft[idx]; 676 677 RXDESC_READIN(idx); 678 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 679 /* This RX Descriptor has not been received yet */ 680 break; 681 } 682 683 /* 684 * build mbuf from RX Descriptor if needed 685 */ 686 m = rxs->rxs_mbuf; 687 rxs->rxs_mbuf = NULL; 688 689 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 690 len = RXFLAGS1_LEN(flags1); 691 692 #define RACC_SHIFT16 2 693 if (m0 == NULL) { 694 m0 = m; 695 m_adj(m0, RACC_SHIFT16); 696 len -= RACC_SHIFT16; 697 m->m_len = len; 698 amount = len; 699 } else { 700 if (flags1 & RXFLAGS1_L) 701 len = len - amount - RACC_SHIFT16; 702 703 m->m_len = len; 704 amount += len; 705 m->m_flags &= ~M_PKTHDR; 706 mprev->m_next = m; 707 } 708 mprev = m; 709 710 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 711 712 if (flags1 & RXFLAGS1_L) { 713 /* last buffer */ 714 if ((amount < ETHER_HDR_LEN) || 715 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 716 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 717 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 718 RXFLAGS2_CE)))) { 719 720 #ifdef DEBUG_ENET 721 if (enet_debug) { 722 char flags1buf[128], flags2buf[128]; 723 snprintb(flags1buf, sizeof(flags1buf), 724 "\20" "\31MISS" "\26LENGTHVIOLATION" 725 "\25NONOCTET" "\23CRC" "\22OVERRUN" 726 "\21TRUNCATED", flags1); 727 snprintb(flags2buf, sizeof(flags2buf), 728 "\20" "\40MAC" "\33PHY" 729 "\32COLLISION", flags2); 730 731 DEVICE_DPRINTF( 732 "rxdesc[%d]: receive error: " 733 "flags1=%s,flags2=%s,len=%d\n", 734 idx, flags1buf, flags2buf, amount); 735 } 736 #endif /* DEBUG_ENET */ 737 ifp->if_ierrors++; 738 m_freem(m0); 739 740 } else { 741 /* packet receive ok */ 742 ifp->if_ipackets++; 743 m0->m_pkthdr.rcvif = ifp; 744 m0->m_pkthdr.len = amount; 745 746 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 747 rxs->rxs_dmamap->dm_mapsize, 748 BUS_DMASYNC_PREREAD); 749 750 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 751 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 752 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 753 enet_rx_csum(sc, ifp, m0, idx); 754 755 /* Pass this up to any BPF listeners */ 756 bpf_mtap(ifp, m0); 757 758 (*ifp->if_input)(ifp, m0); 759 } 760 761 m0 = NULL; 762 mprev = NULL; 763 amount = 0; 764 765 } else { 766 /* continued from previous buffer */ 767 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 768 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 769 } 770 771 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 772 if (enet_alloc_rxbuf(sc, idx) != 0) { 773 panic("enet_alloc_rxbuf NULL\n"); 774 } 775 } 776 sc->sc_rx_readidx = idx; 777 778 /* re-enable RX DMA to make sure */ 779 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 780 781 return 1; 782 } 783 784 static void 785 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 786 { 787 uint32_t flags2; 788 uint8_t proto; 789 790 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 791 792 if (flags2 & RXFLAGS2_IPV6) { 793 proto = sc->sc_rxdesc_ring[idx].rx_proto; 794 795 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 796 if ((proto == IPPROTO_TCP) && 797 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 798 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 799 else if ((proto == IPPROTO_UDP) && 800 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 801 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 802 else 803 return; 804 805 /* IPv6 protocol checksum error */ 806 if (flags2 & RXFLAGS2_PCR) 807 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 808 809 } else { 810 struct ether_header *eh; 811 uint8_t *ip; 812 813 eh = mtod(m, struct ether_header *); 814 815 /* XXX: is an IPv4? */ 816 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 817 return; 818 ip = (uint8_t *)(eh + 1); 819 if ((ip[0] & 0xf0) == 0x40) 820 return; 821 822 proto = sc->sc_rxdesc_ring[idx].rx_proto; 823 if (flags2 & RXFLAGS2_ICE) { 824 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 825 m->m_pkthdr.csum_flags |= 826 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 827 } 828 } else { 829 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 830 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 831 } 832 833 /* 834 * PCR is valid when 835 * ICE == 0 and FRAG == 0 836 */ 837 if (flags2 & RXFLAGS2_FRAG) 838 return; 839 840 /* 841 * PCR is valid when proto is TCP or UDP 842 */ 843 if ((proto == IPPROTO_TCP) && 844 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 845 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 846 else if ((proto == IPPROTO_UDP) && 847 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 848 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 849 else 850 return; 851 852 /* IPv4 protocol cksum error */ 853 if (flags2 & RXFLAGS2_PCR) 854 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 855 } 856 } 857 } 858 859 static void 860 enet_setmulti(struct enet_softc *sc) 861 { 862 struct ifnet *ifp; 863 struct ether_multi *enm; 864 struct ether_multistep step; 865 int promisc; 866 uint32_t crc; 867 uint32_t gaddr[2]; 868 869 ifp = &sc->sc_ethercom.ec_if; 870 871 promisc = 0; 872 if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) { 873 ifp->if_flags |= IFF_ALLMULTI; 874 if (ifp->if_flags & IFF_PROMISC) 875 promisc = 1; 876 gaddr[0] = gaddr[1] = 0xffffffff; 877 } else { 878 gaddr[0] = gaddr[1] = 0; 879 880 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 881 while (enm != NULL) { 882 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 883 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 884 ETHER_NEXT_MULTI(step, enm); 885 } 886 } 887 888 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 889 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 890 891 if (promisc) { 892 /* match all packet */ 893 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 894 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 895 } else { 896 /* don't match any packet */ 897 ENET_REG_WRITE(sc, ENET_IAUR, 0); 898 ENET_REG_WRITE(sc, ENET_IALR, 0); 899 } 900 } 901 902 static void 903 enet_ocotp_getmacaddr(uint8_t *macaddr) 904 { 905 #if NIMXOCOTP > 0 906 uint32_t addr; 907 908 addr = imxocotp_read(OCOTP_MAC1); 909 macaddr[0] = addr >> 8; 910 macaddr[1] = addr; 911 912 addr = imxocotp_read(OCOTP_MAC0); 913 macaddr[2] = addr >> 24; 914 macaddr[3] = addr >> 16; 915 macaddr[4] = addr >> 8; 916 macaddr[5] = addr; 917 #endif 918 } 919 920 static void 921 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 922 { 923 uint32_t paddr; 924 925 paddr = ENET_REG_READ(sc, ENET_PALR); 926 hwaddr[0] = paddr >> 24; 927 hwaddr[1] = paddr >> 16; 928 hwaddr[2] = paddr >> 8; 929 hwaddr[3] = paddr; 930 931 paddr = ENET_REG_READ(sc, ENET_PAUR); 932 hwaddr[4] = paddr >> 24; 933 hwaddr[5] = paddr >> 16; 934 } 935 936 static void 937 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 938 { 939 uint32_t paddr; 940 941 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 942 hwaddr[3]; 943 ENET_REG_WRITE(sc, ENET_PALR, paddr); 944 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 945 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 946 } 947 948 /* 949 * ifnet interfaces 950 */ 951 static int 952 enet_init(struct ifnet *ifp) 953 { 954 struct enet_softc *sc; 955 int s, error; 956 957 sc = ifp->if_softc; 958 959 s = splnet(); 960 961 enet_init_regs(sc, 0); 962 enet_init_txring(sc); 963 error = enet_init_rxring(sc); 964 if (error != 0) { 965 enet_drain_rxbuf(sc); 966 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 967 goto init_failure; 968 } 969 970 /* reload mac address */ 971 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 972 enet_sethwaddr(sc, sc->sc_enaddr); 973 974 /* program multicast address */ 975 enet_setmulti(sc); 976 977 /* update if_flags */ 978 ifp->if_flags |= IFF_RUNNING; 979 ifp->if_flags &= ~IFF_OACTIVE; 980 981 /* update local copy of if_flags */ 982 sc->sc_if_flags = ifp->if_flags; 983 984 /* mii */ 985 mii_mediachg(&sc->sc_mii); 986 987 /* enable RX DMA */ 988 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 989 990 sc->sc_stopping = false; 991 callout_schedule(&sc->sc_tick_ch, hz); 992 993 init_failure: 994 splx(s); 995 996 return error; 997 } 998 999 static void 1000 enet_start(struct ifnet *ifp) 1001 { 1002 struct enet_softc *sc; 1003 struct mbuf *m; 1004 int npkt; 1005 1006 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1007 return; 1008 1009 sc = ifp->if_softc; 1010 for (npkt = 0; ; npkt++) { 1011 IFQ_POLL(&ifp->if_snd, m); 1012 if (m == NULL) 1013 break; 1014 1015 if (sc->sc_tx_free <= 0) { 1016 /* no tx descriptor now... */ 1017 ifp->if_flags |= IFF_OACTIVE; 1018 DEVICE_DPRINTF("TX descriptor is full\n"); 1019 break; 1020 } 1021 1022 IFQ_DEQUEUE(&ifp->if_snd, m); 1023 1024 if (enet_encap_txring(sc, &m) != 0) { 1025 /* too many mbuf chains? */ 1026 ifp->if_flags |= IFF_OACTIVE; 1027 DEVICE_DPRINTF( 1028 "TX descriptor is full. dropping packet\n"); 1029 m_freem(m); 1030 ifp->if_oerrors++; 1031 break; 1032 } 1033 1034 /* Pass the packet to any BPF listeners */ 1035 bpf_mtap(ifp, m); 1036 } 1037 1038 if (npkt) { 1039 /* enable TX DMA */ 1040 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 1041 1042 ifp->if_timer = 5; 1043 } 1044 } 1045 1046 static void 1047 enet_stop(struct ifnet *ifp, int disable) 1048 { 1049 struct enet_softc *sc; 1050 int s; 1051 uint32_t v; 1052 1053 sc = ifp->if_softc; 1054 1055 s = splnet(); 1056 1057 sc->sc_stopping = true; 1058 callout_stop(&sc->sc_tick_ch); 1059 1060 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 1061 v = ENET_REG_READ(sc, ENET_ECR); 1062 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 1063 1064 /* Mark the interface as down and cancel the watchdog timer. */ 1065 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1066 ifp->if_timer = 0; 1067 1068 if (disable) { 1069 enet_drain_txbuf(sc); 1070 enet_drain_rxbuf(sc); 1071 } 1072 1073 splx(s); 1074 } 1075 1076 static void 1077 enet_watchdog(struct ifnet *ifp) 1078 { 1079 struct enet_softc *sc; 1080 int s; 1081 1082 sc = ifp->if_softc; 1083 s = splnet(); 1084 1085 device_printf(sc->sc_dev, "watchdog timeout\n"); 1086 ifp->if_oerrors++; 1087 1088 /* salvage packets left in descriptors */ 1089 enet_tx_intr(sc); 1090 enet_rx_intr(sc); 1091 1092 /* reset */ 1093 enet_stop(ifp, 1); 1094 enet_init(ifp); 1095 1096 splx(s); 1097 } 1098 1099 static void 1100 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1101 { 1102 struct enet_softc *sc = ifp->if_softc; 1103 1104 ether_mediastatus(ifp, ifmr); 1105 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 1106 | sc->sc_flowflags; 1107 } 1108 1109 static int 1110 enet_ifflags_cb(struct ethercom *ec) 1111 { 1112 struct ifnet *ifp = &ec->ec_if; 1113 struct enet_softc *sc = ifp->if_softc; 1114 int change = ifp->if_flags ^ sc->sc_if_flags; 1115 1116 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1117 return ENETRESET; 1118 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1119 return 0; 1120 1121 enet_setmulti(sc); 1122 1123 sc->sc_if_flags = ifp->if_flags; 1124 return 0; 1125 } 1126 1127 static int 1128 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1129 { 1130 struct enet_softc *sc; 1131 struct ifreq *ifr; 1132 int s, error; 1133 uint32_t v; 1134 1135 sc = ifp->if_softc; 1136 ifr = data; 1137 1138 error = 0; 1139 1140 s = splnet(); 1141 1142 switch (command) { 1143 case SIOCSIFMTU: 1144 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1145 error = EINVAL; 1146 } else { 1147 ifp->if_mtu = ifr->ifr_mtu; 1148 1149 /* set maximum frame length */ 1150 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1151 ENET_REG_WRITE(sc, ENET_FTRL, v); 1152 v = ENET_REG_READ(sc, ENET_RCR); 1153 v &= ~ENET_RCR_MAX_FL(0x3fff); 1154 v |= ENET_RCR_MAX_FL(ifp->if_mtu + 1155 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1156 ENET_REG_WRITE(sc, ENET_RCR, v); 1157 } 1158 break; 1159 case SIOCSIFMEDIA: 1160 case SIOCGIFMEDIA: 1161 /* Flow control requires full-duplex mode. */ 1162 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1163 (ifr->ifr_media & IFM_FDX) == 0) 1164 ifr->ifr_media &= ~IFM_ETH_FMASK; 1165 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1166 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1167 /* We can do both TXPAUSE and RXPAUSE. */ 1168 ifr->ifr_media |= 1169 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1170 } 1171 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1172 } 1173 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1174 break; 1175 default: 1176 error = ether_ioctl(ifp, command, data); 1177 if (error != ENETRESET) 1178 break; 1179 1180 /* post-process */ 1181 error = 0; 1182 switch (command) { 1183 case SIOCSIFCAP: 1184 error = (*ifp->if_init)(ifp); 1185 break; 1186 case SIOCADDMULTI: 1187 case SIOCDELMULTI: 1188 if (ifp->if_flags & IFF_RUNNING) 1189 enet_setmulti(sc); 1190 break; 1191 } 1192 break; 1193 } 1194 1195 splx(s); 1196 1197 return error; 1198 } 1199 1200 /* 1201 * for MII 1202 */ 1203 static int 1204 enet_miibus_readreg(device_t dev, int phy, int reg) 1205 { 1206 struct enet_softc *sc; 1207 int timeout; 1208 uint32_t val, status; 1209 1210 sc = device_private(dev); 1211 1212 /* clear MII update */ 1213 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1214 1215 /* read command */ 1216 ENET_REG_WRITE(sc, ENET_MMFR, 1217 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1218 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1219 1220 /* check MII update */ 1221 for (timeout = 5000; timeout > 0; --timeout) { 1222 status = ENET_REG_READ(sc, ENET_EIR); 1223 if (status & ENET_EIR_MII) 1224 break; 1225 } 1226 if (timeout <= 0) { 1227 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1228 reg); 1229 val = -1; 1230 } else { 1231 val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1232 } 1233 1234 return val; 1235 } 1236 1237 static void 1238 enet_miibus_writereg(device_t dev, int phy, int reg, int val) 1239 { 1240 struct enet_softc *sc; 1241 int timeout; 1242 1243 sc = device_private(dev); 1244 1245 /* clear MII update */ 1246 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1247 1248 /* write command */ 1249 ENET_REG_WRITE(sc, ENET_MMFR, 1250 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1251 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1252 (ENET_MMFR_DATAMASK & val)); 1253 1254 /* check MII update */ 1255 for (timeout = 5000; timeout > 0; --timeout) { 1256 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1257 break; 1258 } 1259 if (timeout <= 0) { 1260 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", 1261 reg); 1262 } 1263 } 1264 1265 static void 1266 enet_miibus_statchg(struct ifnet *ifp) 1267 { 1268 struct enet_softc *sc; 1269 struct mii_data *mii; 1270 struct ifmedia_entry *ife; 1271 uint32_t ecr, ecr0; 1272 uint32_t rcr, rcr0; 1273 uint32_t tcr, tcr0; 1274 1275 sc = ifp->if_softc; 1276 mii = &sc->sc_mii; 1277 ife = mii->mii_media.ifm_cur; 1278 1279 /* get current status */ 1280 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1281 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1282 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1283 1284 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1285 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1286 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1287 mii->mii_media_active &= ~IFM_ETH_FMASK; 1288 } 1289 1290 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1291 tcr |= ENET_TCR_FDEN; /* full duplex */ 1292 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1293 } else { 1294 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1295 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1296 } 1297 1298 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1299 /* 1300 * need to reset because 1301 * FDEN can change when ECR[ETHEREN] is 0 1302 */ 1303 enet_init_regs(sc, 0); 1304 return; 1305 } 1306 1307 switch (IFM_SUBTYPE(ife->ifm_media)) { 1308 case IFM_AUTO: 1309 case IFM_1000_T: 1310 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1311 break; 1312 case IFM_100_TX: 1313 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1314 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1315 break; 1316 case IFM_10_T: 1317 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1318 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1319 break; 1320 default: 1321 ecr = ecr0; 1322 rcr = rcr0; 1323 tcr = tcr0; 1324 break; 1325 } 1326 1327 if (sc->sc_flowflags & IFM_FLOW) 1328 rcr |= ENET_RCR_FCE; 1329 else 1330 rcr &= ~ENET_RCR_FCE; 1331 1332 /* update registers if need change */ 1333 if (ecr != ecr0) 1334 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1335 if (rcr != rcr0) 1336 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1337 if (tcr != tcr0) 1338 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1339 } 1340 1341 /* 1342 * handling descriptors 1343 */ 1344 static void 1345 enet_init_txring(struct enet_softc *sc) 1346 { 1347 int i; 1348 1349 /* build TX ring */ 1350 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1351 sc->sc_txdesc_ring[i].tx_flags1_len = 1352 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1353 sc->sc_txdesc_ring[i].tx_databuf = 0; 1354 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1355 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1356 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1357 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1358 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1359 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1360 1361 TXDESC_WRITEOUT(i); 1362 } 1363 1364 sc->sc_tx_free = ENET_TX_RING_CNT; 1365 sc->sc_tx_considx = 0; 1366 sc->sc_tx_prodidx = 0; 1367 } 1368 1369 static int 1370 enet_init_rxring(struct enet_softc *sc) 1371 { 1372 int i, error; 1373 1374 /* build RX ring */ 1375 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1376 error = enet_alloc_rxbuf(sc, i); 1377 if (error != 0) 1378 return error; 1379 } 1380 1381 sc->sc_rx_readidx = 0; 1382 1383 return 0; 1384 } 1385 1386 static int 1387 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1388 { 1389 struct mbuf *m; 1390 int error; 1391 1392 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1393 1394 /* free mbuf if already allocated */ 1395 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1396 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1397 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1398 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1399 } 1400 1401 /* allocate new mbuf cluster */ 1402 MGETHDR(m, M_DONTWAIT, MT_DATA); 1403 if (m == NULL) 1404 return ENOBUFS; 1405 MCLGET(m, M_DONTWAIT); 1406 if (!(m->m_flags & M_EXT)) { 1407 m_freem(m); 1408 return ENOBUFS; 1409 } 1410 m->m_len = MCLBYTES; 1411 m->m_next = NULL; 1412 1413 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1414 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1415 BUS_DMA_READ | BUS_DMA_NOWAIT); 1416 if (error) 1417 return error; 1418 1419 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1420 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1421 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1422 1423 sc->sc_rxsoft[idx].rxs_mbuf = m; 1424 enet_reset_rxdesc(sc, idx); 1425 return 0; 1426 } 1427 1428 static void 1429 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1430 { 1431 uint32_t paddr; 1432 1433 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1434 1435 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1436 RXFLAGS1_E | 1437 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1438 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1439 sc->sc_rxdesc_ring[idx].rx_flags2 = 1440 RXFLAGS2_INT; 1441 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1442 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1443 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1444 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1445 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1446 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1447 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1448 1449 RXDESC_WRITEOUT(idx); 1450 } 1451 1452 static void 1453 enet_drain_txbuf(struct enet_softc *sc) 1454 { 1455 int idx; 1456 struct enet_txsoft *txs; 1457 struct ifnet *ifp; 1458 1459 ifp = &sc->sc_ethercom.ec_if; 1460 1461 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1462 idx = ENET_TX_NEXTIDX(idx)) { 1463 1464 /* txsoft[] is used only first segment */ 1465 txs = &sc->sc_txsoft[idx]; 1466 TXDESC_READIN(idx); 1467 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1468 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1469 bus_dmamap_unload(sc->sc_dmat, 1470 txs->txs_dmamap); 1471 m_freem(txs->txs_mbuf); 1472 1473 ifp->if_oerrors++; 1474 } 1475 sc->sc_tx_free++; 1476 } 1477 } 1478 1479 static void 1480 enet_drain_rxbuf(struct enet_softc *sc) 1481 { 1482 int i; 1483 1484 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1485 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1486 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1487 bus_dmamap_unload(sc->sc_dmat, 1488 sc->sc_rxsoft[i].rxs_dmamap); 1489 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1490 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1491 } 1492 } 1493 } 1494 1495 static int 1496 enet_alloc_ring(struct enet_softc *sc) 1497 { 1498 int i, error; 1499 1500 /* 1501 * build DMA maps for TX. 1502 * TX descriptor must be able to contain mbuf chains, 1503 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1504 */ 1505 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1506 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1507 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1508 &sc->sc_txsoft[i].txs_dmamap); 1509 1510 if (error) { 1511 aprint_error_dev(sc->sc_dev, 1512 "can't create DMA map for TX descs\n"); 1513 goto fail_1; 1514 } 1515 } 1516 1517 /* 1518 * build DMA maps for RX. 1519 * RX descripter contains An mbuf cluster, 1520 * and make up a dmamap. 1521 */ 1522 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1523 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1524 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1525 &sc->sc_rxsoft[i].rxs_dmamap); 1526 if (error) { 1527 aprint_error_dev(sc->sc_dev, 1528 "can't create DMA map for RX descs\n"); 1529 goto fail_2; 1530 } 1531 } 1532 1533 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1534 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1535 return -1; 1536 memset(sc->sc_txdesc_ring, 0, 1537 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1538 1539 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1540 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1541 return -1; 1542 memset(sc->sc_rxdesc_ring, 0, 1543 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1544 1545 return 0; 1546 1547 fail_2: 1548 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1549 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1550 bus_dmamap_destroy(sc->sc_dmat, 1551 sc->sc_rxsoft[i].rxs_dmamap); 1552 } 1553 fail_1: 1554 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1555 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1556 bus_dmamap_destroy(sc->sc_dmat, 1557 sc->sc_txsoft[i].txs_dmamap); 1558 } 1559 return error; 1560 } 1561 1562 static int 1563 enet_encap_mbufalign(struct mbuf **mp) 1564 { 1565 struct mbuf *m, *m0, *mt, *p, *x; 1566 void *ap; 1567 uint32_t alignoff, chiplen; 1568 1569 /* 1570 * iMX6 SoC ethernet controller requires 1571 * address of buffer must aligned 8, and 1572 * length of buffer must be greater than 10 (first fragment only?) 1573 */ 1574 #define ALIGNBYTE 8 1575 #define MINBUFSIZE 10 1576 #define ALIGN_PTR(p, align) \ 1577 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1578 1579 m0 = *mp; 1580 mt = p = NULL; 1581 for (m = m0; m != NULL; m = m->m_next) { 1582 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1583 if (m->m_len < (ALIGNBYTE * 2)) { 1584 /* 1585 * rearrange mbuf data aligned 1586 * 1587 * align 8 * * * * * 1588 * +0123456789abcdef0123456789abcdef0 1589 * FROM m->m_data[___________abcdefghijklmn_______] 1590 * 1591 * +0123456789abcdef0123456789abcdef0 1592 * TO m->m_data[________abcdefghijklm___________] or 1593 * m->m_data[________________abcdefghijklmn__] 1594 */ 1595 if ((alignoff != 0) && (m->m_len != 0)) { 1596 chiplen = ALIGNBYTE - alignoff; 1597 if (M_LEADINGSPACE(m) >= alignoff) { 1598 ap = m->m_data - alignoff; 1599 memmove(ap, m->m_data, m->m_len); 1600 m->m_data = ap; 1601 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1602 ap = m->m_data + chiplen; 1603 memmove(ap, m->m_data, m->m_len); 1604 m->m_data = ap; 1605 } else { 1606 /* 1607 * no space to align data. (M_READONLY?) 1608 * allocate new mbuf aligned, 1609 * and copy to it. 1610 */ 1611 MGET(x, M_DONTWAIT, m->m_type); 1612 if (x == NULL) { 1613 m_freem(m); 1614 return ENOBUFS; 1615 } 1616 MCLAIM(x, m->m_owner); 1617 if (m->m_flags & M_PKTHDR) 1618 M_MOVE_PKTHDR(x, m); 1619 x->m_len = m->m_len; 1620 x->m_data = ALIGN_PTR(x->m_data, 1621 ALIGNBYTE); 1622 memcpy(mtod(x, void *), mtod(m, void *), 1623 m->m_len); 1624 p->m_next = x; 1625 MFREE(m, x->m_next); 1626 m = x; 1627 } 1628 } 1629 1630 /* 1631 * fill 1st mbuf at least 10byte 1632 * 1633 * align 8 * * * * * 1634 * +0123456789abcdef0123456789abcdef0 1635 * FROM m->m_data[________abcde___________________] 1636 * m->m_data[__fg____________________________] 1637 * m->m_data[_________________hi_____________] 1638 * m->m_data[__________jk____________________] 1639 * m->m_data[____l___________________________] 1640 * 1641 * +0123456789abcdef0123456789abcdef0 1642 * TO m->m_data[________abcdefghij______________] 1643 * m->m_data[________________________________] 1644 * m->m_data[________________________________] 1645 * m->m_data[___________k____________________] 1646 * m->m_data[____l___________________________] 1647 */ 1648 if (mt == NULL) { 1649 mt = m; 1650 while (mt->m_len == 0) { 1651 mt = mt->m_next; 1652 if (mt == NULL) { 1653 m_freem(m); 1654 return ENOBUFS; 1655 } 1656 } 1657 1658 /* mt = 1st mbuf, x = 2nd mbuf */ 1659 x = mt->m_next; 1660 while (mt->m_len < MINBUFSIZE) { 1661 if (x == NULL) { 1662 m_freem(m); 1663 return ENOBUFS; 1664 } 1665 1666 alignoff = (uintptr_t)x->m_data & 1667 (ALIGNBYTE - 1); 1668 chiplen = ALIGNBYTE - alignoff; 1669 if (chiplen > x->m_len) { 1670 chiplen = x->m_len; 1671 } else if ((mt->m_len + chiplen) < 1672 MINBUFSIZE) { 1673 /* 1674 * next mbuf should be greater 1675 * than ALIGNBYTE? 1676 */ 1677 if (x->m_len >= (chiplen + 1678 ALIGNBYTE * 2)) 1679 chiplen += ALIGNBYTE; 1680 else 1681 chiplen = x->m_len; 1682 } 1683 1684 if (chiplen && 1685 (M_TRAILINGSPACE(mt) < chiplen)) { 1686 /* 1687 * move data to the begining of 1688 * m_dat[] (aligned) to en- 1689 * large trailingspace 1690 */ 1691 if (mt->m_flags & M_EXT) { 1692 ap = mt->m_ext.ext_buf; 1693 } else if (mt->m_flags & 1694 M_PKTHDR) { 1695 ap = mt->m_pktdat; 1696 } else { 1697 ap = mt->m_dat; 1698 } 1699 ap = ALIGN_PTR(ap, ALIGNBYTE); 1700 memcpy(ap, mt->m_data, mt->m_len); 1701 mt->m_data = ap; 1702 } 1703 1704 if (chiplen && 1705 (M_TRAILINGSPACE(mt) >= chiplen)) { 1706 memcpy(mt->m_data + mt->m_len, 1707 x->m_data, chiplen); 1708 mt->m_len += chiplen; 1709 m_adj(x, chiplen); 1710 } 1711 1712 x = x->m_next; 1713 } 1714 } 1715 1716 } else { 1717 mt = m; 1718 1719 /* 1720 * allocate new mbuf x, and rearrange as below; 1721 * 1722 * align 8 * * * * * 1723 * +0123456789abcdef0123456789abcdef0 1724 * FROM m->m_data[____________abcdefghijklmnopq___] 1725 * 1726 * +0123456789abcdef0123456789abcdef0 1727 * TO x->m_data[________abcdefghijkl____________] 1728 * m->m_data[________________________mnopq___] 1729 * 1730 */ 1731 if (alignoff != 0) { 1732 /* at least ALIGNBYTE */ 1733 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1734 1735 MGET(x, M_DONTWAIT, m->m_type); 1736 if (x == NULL) { 1737 m_freem(m); 1738 return ENOBUFS; 1739 } 1740 MCLAIM(x, m->m_owner); 1741 if (m->m_flags & M_PKTHDR) 1742 M_MOVE_PKTHDR(x, m); 1743 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1744 memcpy(mtod(x, void *), mtod(m, void *), 1745 chiplen); 1746 x->m_len = chiplen; 1747 x->m_next = m; 1748 m_adj(m, chiplen); 1749 1750 if (p == NULL) 1751 m0 = x; 1752 else 1753 p->m_next = x; 1754 } 1755 } 1756 p = m; 1757 } 1758 *mp = m0; 1759 1760 return 0; 1761 } 1762 1763 static int 1764 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1765 { 1766 bus_dmamap_t map; 1767 struct mbuf *m; 1768 int csumflags, idx, i, error; 1769 uint32_t flags1, flags2; 1770 1771 idx = sc->sc_tx_prodidx; 1772 map = sc->sc_txsoft[idx].txs_dmamap; 1773 1774 /* align mbuf data for claim of ENET */ 1775 error = enet_encap_mbufalign(mp); 1776 if (error != 0) 1777 return error; 1778 1779 m = *mp; 1780 csumflags = m->m_pkthdr.csum_flags; 1781 1782 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1783 BUS_DMA_NOWAIT); 1784 if (error != 0) { 1785 device_printf(sc->sc_dev, 1786 "Error mapping mbuf into TX chain: error=%d\n", error); 1787 m_freem(m); 1788 return error; 1789 } 1790 1791 if (map->dm_nsegs > sc->sc_tx_free) { 1792 bus_dmamap_unload(sc->sc_dmat, map); 1793 device_printf(sc->sc_dev, 1794 "too many mbuf chain %d\n", map->dm_nsegs); 1795 m_freem(m); 1796 return ENOBUFS; 1797 } 1798 1799 /* fill protocol cksum zero beforehand */ 1800 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1801 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1802 struct mbuf *m1; 1803 int ehlen, moff; 1804 uint16_t etype; 1805 1806 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1807 switch (ntohs(etype)) { 1808 case ETHERTYPE_IP: 1809 case ETHERTYPE_IPV6: 1810 ehlen = ETHER_HDR_LEN; 1811 break; 1812 case ETHERTYPE_VLAN: 1813 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1814 break; 1815 default: 1816 ehlen = 0; 1817 break; 1818 } 1819 1820 if (ehlen) { 1821 m1 = m_getptr(m, ehlen + 1822 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1823 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data), 1824 &moff); 1825 if (m1 != NULL) 1826 *(uint16_t *)(mtod(m1, char *) + moff) = 0; 1827 } 1828 } 1829 1830 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1831 BUS_DMASYNC_PREWRITE); 1832 1833 for (i = 0; i < map->dm_nsegs; i++) { 1834 flags1 = TXFLAGS1_R; 1835 flags2 = 0; 1836 1837 if (i == 0) { 1838 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1839 sc->sc_txsoft[idx].txs_mbuf = m; 1840 } 1841 1842 /* checksum offloading */ 1843 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1844 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1845 flags2 |= TXFLAGS2_PINS; 1846 if (csumflags & (M_CSUM_IPv4)) 1847 flags2 |= TXFLAGS2_IINS; 1848 1849 if (i == map->dm_nsegs - 1) { 1850 /* mark last segment */ 1851 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1852 flags2 |= TXFLAGS2_INT; 1853 } 1854 if (idx == ENET_TX_RING_CNT - 1) { 1855 /* mark end of ring */ 1856 flags1 |= TXFLAGS1_W; 1857 } 1858 1859 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1860 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1861 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1862 sc->sc_txdesc_ring[idx].tx_flags1_len = 1863 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1864 1865 TXDESC_WRITEOUT(idx); 1866 1867 idx = ENET_TX_NEXTIDX(idx); 1868 sc->sc_tx_free--; 1869 } 1870 1871 sc->sc_tx_prodidx = idx; 1872 1873 return 0; 1874 } 1875 1876 /* 1877 * device initialize 1878 */ 1879 static int 1880 enet_init_plls(struct enet_softc *sc) 1881 { 1882 #if NIMXCCM > 0 1883 /* PLL power up */ 1884 if (imx6_pll_power(CCM_ANALOG_PLL_ENET, 1) != 0) { 1885 aprint_error_dev(sc->sc_dev, 1886 "couldn't enable CCM_ANALOG_PLL_ENET\n"); 1887 return -1; 1888 } 1889 #endif 1890 1891 return 0; 1892 } 1893 1894 static int 1895 enet_init_regs(struct enet_softc *sc, int init) 1896 { 1897 struct mii_data *mii; 1898 struct ifmedia_entry *ife; 1899 paddr_t paddr; 1900 uint32_t val; 1901 int fulldup, ecr_speed, rcr_speed, flowctrl; 1902 1903 if (init) { 1904 fulldup = 1; 1905 ecr_speed = ENET_ECR_SPEED; 1906 rcr_speed = 0; 1907 flowctrl = 0; 1908 } else { 1909 mii = &sc->sc_mii; 1910 ife = mii->mii_media.ifm_cur; 1911 1912 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1913 fulldup = 1; 1914 else 1915 fulldup = 0; 1916 1917 switch (IFM_SUBTYPE(ife->ifm_media)) { 1918 case IFM_10_T: 1919 ecr_speed = 0; 1920 rcr_speed = ENET_RCR_RMII_10T; 1921 break; 1922 case IFM_100_TX: 1923 ecr_speed = 0; 1924 rcr_speed = 0; 1925 break; 1926 default: 1927 ecr_speed = ENET_ECR_SPEED; 1928 rcr_speed = 0; 1929 break; 1930 } 1931 1932 flowctrl = sc->sc_flowflags & IFM_FLOW; 1933 } 1934 1935 /* reset */ 1936 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1937 1938 /* mask and clear all interrupt */ 1939 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1940 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1941 1942 /* full duplex */ 1943 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1944 1945 /* clear and enable MIB register */ 1946 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1947 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1948 1949 /* MII speed setup. MDCclk(=2.5MHz) = PLL6clk/((val+1)*2) */ 1950 val = (imx6_get_clock(IMX6CLK_PLL6) / 500000 - 1) / 10; 1951 ENET_REG_WRITE(sc, ENET_MSCR, val); 1952 1953 /* Opcode/Pause Duration */ 1954 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1955 1956 /* Receive FIFO */ 1957 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1958 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1959 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1960 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1961 1962 /* Transmit FIFO */ 1963 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1964 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1965 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1966 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1967 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1968 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1969 1970 /* hardware checksum is default off (override in TX descripter) */ 1971 ENET_REG_WRITE(sc, ENET_TACC, 0); 1972 1973 /* 1974 * align ethernet payload on 32bit, discard frames with MAC layer error, 1975 * and don't discard checksum error 1976 */ 1977 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1978 1979 /* maximum frame size */ 1980 val = ENET_DEFAULT_PKT_LEN; 1981 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1982 ENET_REG_WRITE(sc, ENET_RCR, 1983 ENET_RCR_PADEN | /* RX frame padding remove */ 1984 ENET_RCR_RGMII_EN | /* use RGMII */ 1985 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1986 rcr_speed | 1987 (fulldup ? 0 : ENET_RCR_DRT) | 1988 ENET_RCR_MAX_FL(val)); 1989 1990 /* Maximum Receive BufSize per one descriptor */ 1991 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1992 1993 1994 /* TX/RX Descriptor Physical Address */ 1995 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1996 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1997 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1998 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1999 /* sync cache */ 2000 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 2001 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2002 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 2003 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2004 2005 /* enable interrupts */ 2006 ENET_REG_WRITE(sc, ENET_EIMR, 2007 ENET_EIR_TXF | 2008 ENET_EIR_RXF | 2009 ENET_EIR_EBERR | 2010 0); 2011 2012 /* enable ether */ 2013 ENET_REG_WRITE(sc, ENET_ECR, 2014 #if _BYTE_ORDER == _LITTLE_ENDIAN 2015 ENET_ECR_DBSWP | 2016 #endif 2017 ENET_ECR_SPEED | /* default 1000Mbps mode */ 2018 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 2019 ENET_ECR_ETHEREN); /* Ethernet Enable */ 2020 2021 return 0; 2022 } 2023 2024 static int 2025 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 2026 bus_dmamap_t *mapp) 2027 { 2028 bus_dma_segment_t seglist[1]; 2029 int nsegs, error; 2030 2031 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 2032 1, &nsegs, M_NOWAIT)) != 0) { 2033 device_printf(sc->sc_dev, 2034 "unable to allocate DMA buffer, error=%d\n", error); 2035 goto fail_alloc; 2036 } 2037 2038 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 2039 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 2040 device_printf(sc->sc_dev, 2041 "unable to map DMA buffer, error=%d\n", 2042 error); 2043 goto fail_map; 2044 } 2045 2046 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2047 BUS_DMA_NOWAIT, mapp)) != 0) { 2048 device_printf(sc->sc_dev, 2049 "unable to create DMA map, error=%d\n", error); 2050 goto fail_create; 2051 } 2052 2053 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 2054 BUS_DMA_NOWAIT)) != 0) { 2055 aprint_error_dev(sc->sc_dev, 2056 "unable to load DMA map, error=%d\n", error); 2057 goto fail_load; 2058 } 2059 2060 return 0; 2061 2062 fail_load: 2063 bus_dmamap_destroy(sc->sc_dmat, *mapp); 2064 fail_create: 2065 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 2066 fail_map: 2067 bus_dmamem_free(sc->sc_dmat, seglist, 1); 2068 fail_alloc: 2069 return error; 2070 } 2071