1 /* $NetBSD: if_enet.c,v 1.15 2018/09/03 16:29:23 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.15 2018/09/03 16:29:23 riastradh Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static int enet_intr(void *); 136 static void enet_tick(void *); 137 static int enet_tx_intr(void *); 138 static int enet_rx_intr(void *); 139 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 140 int); 141 142 static void enet_start(struct ifnet *); 143 static int enet_ifflags_cb(struct ethercom *); 144 static int enet_ioctl(struct ifnet *, u_long, void *); 145 static int enet_init(struct ifnet *); 146 static void enet_stop(struct ifnet *, int); 147 static void enet_watchdog(struct ifnet *); 148 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 149 150 static int enet_miibus_readreg(device_t, int, int); 151 static void enet_miibus_writereg(device_t, int, int, int); 152 static void enet_miibus_statchg(struct ifnet *); 153 154 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 156 static void enet_setmulti(struct enet_softc *); 157 static int enet_encap_mbufalign(struct mbuf **); 158 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 159 static int enet_init_regs(struct enet_softc *, int); 160 static int enet_alloc_ring(struct enet_softc *); 161 static void enet_init_txring(struct enet_softc *); 162 static int enet_init_rxring(struct enet_softc *); 163 static void enet_reset_rxdesc(struct enet_softc *, int); 164 static int enet_alloc_rxbuf(struct enet_softc *, int); 165 static void enet_drain_txbuf(struct enet_softc *); 166 static void enet_drain_rxbuf(struct enet_softc *); 167 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 168 bus_dmamap_t *); 169 170 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 171 enet_match, enet_attach, NULL, NULL); 172 173 void 174 enet_attach_common(device_t self, bus_space_tag_t iot, 175 bus_dma_tag_t dmat, bus_addr_t addr, bus_size_t size, int irq) 176 { 177 struct enet_softc *sc; 178 struct ifnet *ifp; 179 180 sc = device_private(self); 181 sc->sc_dev = self; 182 sc->sc_iot = iot; 183 sc->sc_addr = addr; 184 sc->sc_dmat = dmat; 185 186 aprint_naive("\n"); 187 aprint_normal(": Gigabit Ethernet Controller\n"); 188 if (bus_space_map(sc->sc_iot, sc->sc_addr, size, 0, 189 &sc->sc_ioh)) { 190 aprint_error_dev(self, "cannot map registers\n"); 191 return; 192 } 193 194 /* allocate dma buffer */ 195 if (enet_alloc_ring(sc)) 196 return; 197 198 #define IS_ENADDR_ZERO(enaddr) \ 199 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 200 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 201 202 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 203 /* by any chance, mac-address is already set by bootloader? */ 204 enet_gethwaddr(sc, sc->sc_enaddr); 205 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 206 /* give up. set randomly */ 207 uint32_t eaddr = random(); 208 /* not multicast */ 209 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 210 sc->sc_enaddr[1] = eaddr >> 16; 211 sc->sc_enaddr[2] = eaddr >> 8; 212 sc->sc_enaddr[3] = eaddr; 213 eaddr = random(); 214 sc->sc_enaddr[4] = eaddr >> 8; 215 sc->sc_enaddr[5] = eaddr; 216 217 aprint_error_dev(self, 218 "cannot get mac address. set randomly\n"); 219 } 220 } 221 enet_sethwaddr(sc, sc->sc_enaddr); 222 223 aprint_normal_dev(self, "Ethernet address %s\n", 224 ether_sprintf(sc->sc_enaddr)); 225 226 enet_init_regs(sc, 1); 227 228 /* setup interrupt handlers */ 229 if ((sc->sc_ih = intr_establish(irq, IPL_NET, 230 IST_LEVEL, enet_intr, sc)) == NULL) { 231 aprint_error_dev(self, "unable to establish interrupt\n"); 232 goto failure; 233 } 234 235 if (sc->sc_imxtype == 7) { 236 /* i.MX7 use 3 interrupts */ 237 if ((sc->sc_ih2 = intr_establish(irq + 1, IPL_NET, 238 IST_LEVEL, enet_intr, sc)) == NULL) { 239 aprint_error_dev(self, 240 "unable to establish 2nd interrupt\n"); 241 intr_disestablish(sc->sc_ih); 242 goto failure; 243 } 244 if ((sc->sc_ih3 = intr_establish(irq + 2, IPL_NET, 245 IST_LEVEL, enet_intr, sc)) == NULL) { 246 aprint_error_dev(self, 247 "unable to establish 3rd interrupt\n"); 248 intr_disestablish(sc->sc_ih2); 249 intr_disestablish(sc->sc_ih); 250 goto failure; 251 } 252 } 253 254 /* callout will be scheduled from enet_init() */ 255 callout_init(&sc->sc_tick_ch, 0); 256 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 257 258 /* setup ifp */ 259 ifp = &sc->sc_ethercom.ec_if; 260 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 261 ifp->if_softc = sc; 262 ifp->if_mtu = ETHERMTU; 263 ifp->if_baudrate = IF_Gbps(1); 264 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 265 ifp->if_ioctl = enet_ioctl; 266 ifp->if_start = enet_start; 267 ifp->if_init = enet_init; 268 ifp->if_stop = enet_stop; 269 ifp->if_watchdog = enet_watchdog; 270 271 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 272 #ifdef ENET_SUPPORT_JUMBO 273 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 274 #endif 275 276 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 277 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 278 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 279 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 280 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 281 282 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 283 IFQ_SET_READY(&ifp->if_snd); 284 285 /* setup MII */ 286 sc->sc_ethercom.ec_mii = &sc->sc_mii; 287 sc->sc_mii.mii_ifp = ifp; 288 sc->sc_mii.mii_readreg = enet_miibus_readreg; 289 sc->sc_mii.mii_writereg = enet_miibus_writereg; 290 sc->sc_mii.mii_statchg = enet_miibus_statchg; 291 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 292 enet_mediastatus); 293 294 /* try to attach PHY */ 295 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 296 MII_OFFSET_ANY, 0); 297 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 298 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 299 0, NULL); 300 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 301 } else { 302 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 303 } 304 305 if_attach(ifp); 306 ether_ifattach(ifp, sc->sc_enaddr); 307 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 308 309 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 310 RND_TYPE_NET, RND_FLAG_DEFAULT); 311 312 #ifdef ENET_EVENT_COUNTER 313 enet_attach_evcnt(sc); 314 #endif 315 316 sc->sc_stopping = false; 317 318 return; 319 320 failure: 321 bus_space_unmap(sc->sc_iot, sc->sc_ioh, size); 322 return; 323 } 324 325 #ifdef ENET_EVENT_COUNTER 326 static void 327 enet_attach_evcnt(struct enet_softc *sc) 328 { 329 const char *xname; 330 331 xname = device_xname(sc->sc_dev); 332 333 #define ENET_EVCNT_ATTACH(name) \ 334 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 335 NULL, xname, #name); 336 337 ENET_EVCNT_ATTACH(t_drop); 338 ENET_EVCNT_ATTACH(t_packets); 339 ENET_EVCNT_ATTACH(t_bc_pkt); 340 ENET_EVCNT_ATTACH(t_mc_pkt); 341 ENET_EVCNT_ATTACH(t_crc_align); 342 ENET_EVCNT_ATTACH(t_undersize); 343 ENET_EVCNT_ATTACH(t_oversize); 344 ENET_EVCNT_ATTACH(t_frag); 345 ENET_EVCNT_ATTACH(t_jab); 346 ENET_EVCNT_ATTACH(t_col); 347 ENET_EVCNT_ATTACH(t_p64); 348 ENET_EVCNT_ATTACH(t_p65to127n); 349 ENET_EVCNT_ATTACH(t_p128to255n); 350 ENET_EVCNT_ATTACH(t_p256to511); 351 ENET_EVCNT_ATTACH(t_p512to1023); 352 ENET_EVCNT_ATTACH(t_p1024to2047); 353 ENET_EVCNT_ATTACH(t_p_gte2048); 354 ENET_EVCNT_ATTACH(t_octets); 355 ENET_EVCNT_ATTACH(r_packets); 356 ENET_EVCNT_ATTACH(r_bc_pkt); 357 ENET_EVCNT_ATTACH(r_mc_pkt); 358 ENET_EVCNT_ATTACH(r_crc_align); 359 ENET_EVCNT_ATTACH(r_undersize); 360 ENET_EVCNT_ATTACH(r_oversize); 361 ENET_EVCNT_ATTACH(r_frag); 362 ENET_EVCNT_ATTACH(r_jab); 363 ENET_EVCNT_ATTACH(r_p64); 364 ENET_EVCNT_ATTACH(r_p65to127); 365 ENET_EVCNT_ATTACH(r_p128to255); 366 ENET_EVCNT_ATTACH(r_p256to511); 367 ENET_EVCNT_ATTACH(r_p512to1023); 368 ENET_EVCNT_ATTACH(r_p1024to2047); 369 ENET_EVCNT_ATTACH(r_p_gte2048); 370 ENET_EVCNT_ATTACH(r_octets); 371 } 372 373 static void 374 enet_update_evcnt(struct enet_softc *sc) 375 { 376 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 377 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 378 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 379 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 380 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 381 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 382 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 383 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 384 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 385 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 386 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 387 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 388 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 389 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 390 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 391 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 392 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 393 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 394 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 395 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 396 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 397 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 398 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 399 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 400 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 401 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 402 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 403 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 404 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 405 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 406 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 407 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 408 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 409 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 410 } 411 #endif /* ENET_EVENT_COUNTER */ 412 413 static void 414 enet_tick(void *arg) 415 { 416 struct enet_softc *sc; 417 struct mii_data *mii; 418 struct ifnet *ifp; 419 int s; 420 421 sc = arg; 422 mii = &sc->sc_mii; 423 ifp = &sc->sc_ethercom.ec_if; 424 425 s = splnet(); 426 427 if (sc->sc_stopping) 428 goto out; 429 430 #ifdef ENET_EVENT_COUNTER 431 enet_update_evcnt(sc); 432 #endif 433 434 /* update counters */ 435 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 436 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 437 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 438 439 /* clear counters */ 440 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 441 ENET_REG_WRITE(sc, ENET_MIBC, 0); 442 443 mii_tick(mii); 444 out: 445 446 if (!sc->sc_stopping) 447 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 448 449 splx(s); 450 } 451 452 static int 453 enet_intr(void *arg) 454 { 455 struct enet_softc *sc; 456 struct ifnet *ifp; 457 uint32_t status; 458 459 sc = arg; 460 status = ENET_REG_READ(sc, ENET_EIR); 461 462 if (sc->sc_imxtype == 7) { 463 if (status & (ENET_EIR_TXF|ENET_EIR_TXF1|ENET_EIR_TXF2)) 464 enet_tx_intr(arg); 465 if (status & (ENET_EIR_RXF|ENET_EIR_RXF1|ENET_EIR_RXF2)) 466 enet_rx_intr(arg); 467 } else { 468 if (status & ENET_EIR_TXF) 469 enet_tx_intr(arg); 470 if (status & ENET_EIR_RXF) 471 enet_rx_intr(arg); 472 } 473 474 if (status & ENET_EIR_EBERR) { 475 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 476 ifp = &sc->sc_ethercom.ec_if; 477 enet_stop(ifp, 1); 478 enet_init(ifp); 479 } else { 480 ENET_REG_WRITE(sc, ENET_EIR, status); 481 } 482 483 rnd_add_uint32(&sc->sc_rnd_source, status); 484 485 return 1; 486 } 487 488 static int 489 enet_tx_intr(void *arg) 490 { 491 struct enet_softc *sc; 492 struct ifnet *ifp; 493 struct enet_txsoft *txs; 494 int idx; 495 496 sc = (struct enet_softc *)arg; 497 ifp = &sc->sc_ethercom.ec_if; 498 499 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 500 idx = ENET_TX_NEXTIDX(idx)) { 501 502 txs = &sc->sc_txsoft[idx]; 503 504 TXDESC_READIN(idx); 505 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 506 /* This TX Descriptor has not been transmitted yet */ 507 break; 508 } 509 510 /* txsoft is available on first segment (TXFLAGS1_T1) */ 511 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 512 bus_dmamap_unload(sc->sc_dmat, 513 txs->txs_dmamap); 514 m_freem(txs->txs_mbuf); 515 ifp->if_opackets++; 516 } 517 518 /* checking error */ 519 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 520 uint32_t flags2; 521 522 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 523 524 if (flags2 & (TXFLAGS2_TXE | 525 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 526 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 527 #ifdef DEBUG_ENET 528 if (enet_debug) { 529 char flagsbuf[128]; 530 531 snprintb(flagsbuf, sizeof(flagsbuf), 532 "\20" "\20TRANSMIT" "\16UNDERFLOW" 533 "\15COLLISION" "\14FRAME" 534 "\13LATECOLLISION" "\12OVERFLOW", 535 flags2); 536 537 device_printf(sc->sc_dev, 538 "txdesc[%d]: transmit error: " 539 "flags2=%s\n", idx, flagsbuf); 540 } 541 #endif /* DEBUG_ENET */ 542 ifp->if_oerrors++; 543 } 544 } 545 546 sc->sc_tx_free++; 547 } 548 sc->sc_tx_considx = idx; 549 550 if (sc->sc_tx_free > 0) 551 ifp->if_flags &= ~IFF_OACTIVE; 552 553 /* 554 * No more pending TX descriptor, 555 * cancel the watchdog timer. 556 */ 557 if (sc->sc_tx_free == ENET_TX_RING_CNT) 558 ifp->if_timer = 0; 559 560 return 1; 561 } 562 563 static int 564 enet_rx_intr(void *arg) 565 { 566 struct enet_softc *sc; 567 struct ifnet *ifp; 568 struct enet_rxsoft *rxs; 569 int idx, len, amount; 570 uint32_t flags1, flags2; 571 struct mbuf *m, *m0, *mprev; 572 573 sc = arg; 574 ifp = &sc->sc_ethercom.ec_if; 575 576 m0 = mprev = NULL; 577 amount = 0; 578 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 579 580 rxs = &sc->sc_rxsoft[idx]; 581 582 RXDESC_READIN(idx); 583 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 584 /* This RX Descriptor has not been received yet */ 585 break; 586 } 587 588 /* 589 * build mbuf from RX Descriptor if needed 590 */ 591 m = rxs->rxs_mbuf; 592 rxs->rxs_mbuf = NULL; 593 594 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 595 len = RXFLAGS1_LEN(flags1); 596 597 #define RACC_SHIFT16 2 598 if (m0 == NULL) { 599 m0 = m; 600 m_adj(m0, RACC_SHIFT16); 601 len -= RACC_SHIFT16; 602 m->m_len = len; 603 amount = len; 604 } else { 605 if (flags1 & RXFLAGS1_L) 606 len = len - amount - RACC_SHIFT16; 607 608 m->m_len = len; 609 amount += len; 610 if (m->m_flags & M_PKTHDR) 611 m_remove_pkthdr(m); 612 mprev->m_next = m; 613 } 614 mprev = m; 615 616 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 617 618 if (flags1 & RXFLAGS1_L) { 619 /* last buffer */ 620 if ((amount < ETHER_HDR_LEN) || 621 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 622 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 623 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 624 RXFLAGS2_CE)))) { 625 626 #ifdef DEBUG_ENET 627 if (enet_debug) { 628 char flags1buf[128], flags2buf[128]; 629 snprintb(flags1buf, sizeof(flags1buf), 630 "\20" "\31MISS" "\26LENGTHVIOLATION" 631 "\25NONOCTET" "\23CRC" "\22OVERRUN" 632 "\21TRUNCATED", flags1); 633 snprintb(flags2buf, sizeof(flags2buf), 634 "\20" "\40MAC" "\33PHY" 635 "\32COLLISION", flags2); 636 637 DEVICE_DPRINTF( 638 "rxdesc[%d]: receive error: " 639 "flags1=%s,flags2=%s,len=%d\n", 640 idx, flags1buf, flags2buf, amount); 641 } 642 #endif /* DEBUG_ENET */ 643 ifp->if_ierrors++; 644 m_freem(m0); 645 646 } else { 647 /* packet receive ok */ 648 m_set_rcvif(m0, ifp); 649 m0->m_pkthdr.len = amount; 650 651 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 652 rxs->rxs_dmamap->dm_mapsize, 653 BUS_DMASYNC_PREREAD); 654 655 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 656 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 657 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 658 enet_rx_csum(sc, ifp, m0, idx); 659 660 if_percpuq_enqueue(ifp->if_percpuq, m0); 661 } 662 663 m0 = NULL; 664 mprev = NULL; 665 amount = 0; 666 667 } else { 668 /* continued from previous buffer */ 669 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 670 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 671 } 672 673 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 674 if (enet_alloc_rxbuf(sc, idx) != 0) { 675 panic("enet_alloc_rxbuf NULL\n"); 676 } 677 } 678 sc->sc_rx_readidx = idx; 679 680 /* re-enable RX DMA to make sure */ 681 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 682 683 return 1; 684 } 685 686 static void 687 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 688 { 689 uint32_t flags2; 690 uint8_t proto; 691 692 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 693 694 if (flags2 & RXFLAGS2_IPV6) { 695 proto = sc->sc_rxdesc_ring[idx].rx_proto; 696 697 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 698 if ((proto == IPPROTO_TCP) && 699 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 700 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 701 else if ((proto == IPPROTO_UDP) && 702 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 703 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 704 else 705 return; 706 707 /* IPv6 protocol checksum error */ 708 if (flags2 & RXFLAGS2_PCR) 709 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 710 711 } else { 712 struct ether_header *eh; 713 uint8_t *ip; 714 715 eh = mtod(m, struct ether_header *); 716 717 /* XXX: is an IPv4? */ 718 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 719 return; 720 ip = (uint8_t *)(eh + 1); 721 if ((ip[0] & 0xf0) == 0x40) 722 return; 723 724 proto = sc->sc_rxdesc_ring[idx].rx_proto; 725 if (flags2 & RXFLAGS2_ICE) { 726 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 727 m->m_pkthdr.csum_flags |= 728 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 729 } 730 } else { 731 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 732 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 733 } 734 735 /* 736 * PCR is valid when 737 * ICE == 0 and FRAG == 0 738 */ 739 if (flags2 & RXFLAGS2_FRAG) 740 return; 741 742 /* 743 * PCR is valid when proto is TCP or UDP 744 */ 745 if ((proto == IPPROTO_TCP) && 746 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 747 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 748 else if ((proto == IPPROTO_UDP) && 749 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 750 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 751 else 752 return; 753 754 /* IPv4 protocol cksum error */ 755 if (flags2 & RXFLAGS2_PCR) 756 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 757 } 758 } 759 } 760 761 static void 762 enet_setmulti(struct enet_softc *sc) 763 { 764 struct ifnet *ifp; 765 struct ether_multi *enm; 766 struct ether_multistep step; 767 int promisc; 768 uint32_t crc; 769 uint32_t gaddr[2]; 770 771 ifp = &sc->sc_ethercom.ec_if; 772 773 promisc = 0; 774 if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) { 775 ifp->if_flags |= IFF_ALLMULTI; 776 if (ifp->if_flags & IFF_PROMISC) 777 promisc = 1; 778 gaddr[0] = gaddr[1] = 0xffffffff; 779 } else { 780 gaddr[0] = gaddr[1] = 0; 781 782 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 783 while (enm != NULL) { 784 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 785 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 786 ETHER_NEXT_MULTI(step, enm); 787 } 788 } 789 790 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 791 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 792 793 if (promisc) { 794 /* match all packet */ 795 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 796 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 797 } else { 798 /* don't match any packet */ 799 ENET_REG_WRITE(sc, ENET_IAUR, 0); 800 ENET_REG_WRITE(sc, ENET_IALR, 0); 801 } 802 } 803 804 static void 805 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 806 { 807 uint32_t paddr; 808 809 paddr = ENET_REG_READ(sc, ENET_PALR); 810 hwaddr[0] = paddr >> 24; 811 hwaddr[1] = paddr >> 16; 812 hwaddr[2] = paddr >> 8; 813 hwaddr[3] = paddr; 814 815 paddr = ENET_REG_READ(sc, ENET_PAUR); 816 hwaddr[4] = paddr >> 24; 817 hwaddr[5] = paddr >> 16; 818 } 819 820 static void 821 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 822 { 823 uint32_t paddr; 824 825 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 826 hwaddr[3]; 827 ENET_REG_WRITE(sc, ENET_PALR, paddr); 828 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 829 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 830 } 831 832 /* 833 * ifnet interfaces 834 */ 835 static int 836 enet_init(struct ifnet *ifp) 837 { 838 struct enet_softc *sc; 839 int s, error; 840 841 sc = ifp->if_softc; 842 843 s = splnet(); 844 845 enet_init_regs(sc, 0); 846 enet_init_txring(sc); 847 error = enet_init_rxring(sc); 848 if (error != 0) { 849 enet_drain_rxbuf(sc); 850 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 851 goto init_failure; 852 } 853 854 /* reload mac address */ 855 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 856 enet_sethwaddr(sc, sc->sc_enaddr); 857 858 /* program multicast address */ 859 enet_setmulti(sc); 860 861 /* update if_flags */ 862 ifp->if_flags |= IFF_RUNNING; 863 ifp->if_flags &= ~IFF_OACTIVE; 864 865 /* update local copy of if_flags */ 866 sc->sc_if_flags = ifp->if_flags; 867 868 /* mii */ 869 mii_mediachg(&sc->sc_mii); 870 871 /* enable RX DMA */ 872 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 873 874 sc->sc_stopping = false; 875 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 876 877 init_failure: 878 splx(s); 879 880 return error; 881 } 882 883 static void 884 enet_start(struct ifnet *ifp) 885 { 886 struct enet_softc *sc; 887 struct mbuf *m; 888 int npkt; 889 890 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 891 return; 892 893 sc = ifp->if_softc; 894 for (npkt = 0; ; npkt++) { 895 IFQ_POLL(&ifp->if_snd, m); 896 if (m == NULL) 897 break; 898 899 if (sc->sc_tx_free <= 0) { 900 /* no tx descriptor now... */ 901 ifp->if_flags |= IFF_OACTIVE; 902 DEVICE_DPRINTF("TX descriptor is full\n"); 903 break; 904 } 905 906 IFQ_DEQUEUE(&ifp->if_snd, m); 907 908 if (enet_encap_txring(sc, &m) != 0) { 909 /* too many mbuf chains? */ 910 ifp->if_flags |= IFF_OACTIVE; 911 DEVICE_DPRINTF( 912 "TX descriptor is full. dropping packet\n"); 913 m_freem(m); 914 ifp->if_oerrors++; 915 break; 916 } 917 918 /* Pass the packet to any BPF listeners */ 919 bpf_mtap(ifp, m, BPF_D_OUT); 920 } 921 922 if (npkt) { 923 /* enable TX DMA */ 924 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 925 926 ifp->if_timer = 5; 927 } 928 } 929 930 static void 931 enet_stop(struct ifnet *ifp, int disable) 932 { 933 struct enet_softc *sc; 934 int s; 935 uint32_t v; 936 937 sc = ifp->if_softc; 938 939 s = splnet(); 940 941 sc->sc_stopping = true; 942 callout_stop(&sc->sc_tick_ch); 943 944 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 945 v = ENET_REG_READ(sc, ENET_ECR); 946 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 947 948 /* Mark the interface as down and cancel the watchdog timer. */ 949 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 950 ifp->if_timer = 0; 951 952 if (disable) { 953 enet_drain_txbuf(sc); 954 enet_drain_rxbuf(sc); 955 } 956 957 splx(s); 958 } 959 960 static void 961 enet_watchdog(struct ifnet *ifp) 962 { 963 struct enet_softc *sc; 964 int s; 965 966 sc = ifp->if_softc; 967 s = splnet(); 968 969 device_printf(sc->sc_dev, "watchdog timeout\n"); 970 ifp->if_oerrors++; 971 972 /* salvage packets left in descriptors */ 973 enet_tx_intr(sc); 974 enet_rx_intr(sc); 975 976 /* reset */ 977 enet_stop(ifp, 1); 978 enet_init(ifp); 979 980 splx(s); 981 } 982 983 static void 984 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 985 { 986 struct enet_softc *sc = ifp->if_softc; 987 988 ether_mediastatus(ifp, ifmr); 989 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 990 | sc->sc_flowflags; 991 } 992 993 static int 994 enet_ifflags_cb(struct ethercom *ec) 995 { 996 struct ifnet *ifp = &ec->ec_if; 997 struct enet_softc *sc = ifp->if_softc; 998 int change = ifp->if_flags ^ sc->sc_if_flags; 999 1000 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1001 return ENETRESET; 1002 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1003 return 0; 1004 1005 enet_setmulti(sc); 1006 1007 sc->sc_if_flags = ifp->if_flags; 1008 return 0; 1009 } 1010 1011 static int 1012 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1013 { 1014 struct enet_softc *sc; 1015 struct ifreq *ifr; 1016 int s, error; 1017 uint32_t v; 1018 1019 sc = ifp->if_softc; 1020 ifr = data; 1021 1022 error = 0; 1023 1024 s = splnet(); 1025 1026 switch (command) { 1027 case SIOCSIFMTU: 1028 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1029 error = EINVAL; 1030 } else { 1031 ifp->if_mtu = ifr->ifr_mtu; 1032 1033 /* set maximum frame length */ 1034 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1035 ENET_REG_WRITE(sc, ENET_FTRL, v); 1036 v = ENET_REG_READ(sc, ENET_RCR); 1037 v &= ~ENET_RCR_MAX_FL(0x3fff); 1038 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 1039 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1040 ENET_REG_WRITE(sc, ENET_RCR, v); 1041 } 1042 break; 1043 case SIOCSIFMEDIA: 1044 case SIOCGIFMEDIA: 1045 /* Flow control requires full-duplex mode. */ 1046 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1047 (ifr->ifr_media & IFM_FDX) == 0) 1048 ifr->ifr_media &= ~IFM_ETH_FMASK; 1049 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1050 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1051 /* We can do both TXPAUSE and RXPAUSE. */ 1052 ifr->ifr_media |= 1053 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1054 } 1055 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1056 } 1057 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1058 break; 1059 default: 1060 error = ether_ioctl(ifp, command, data); 1061 if (error != ENETRESET) 1062 break; 1063 1064 /* post-process */ 1065 error = 0; 1066 switch (command) { 1067 case SIOCSIFCAP: 1068 error = (*ifp->if_init)(ifp); 1069 break; 1070 case SIOCADDMULTI: 1071 case SIOCDELMULTI: 1072 if (ifp->if_flags & IFF_RUNNING) 1073 enet_setmulti(sc); 1074 break; 1075 } 1076 break; 1077 } 1078 1079 splx(s); 1080 1081 return error; 1082 } 1083 1084 /* 1085 * for MII 1086 */ 1087 static int 1088 enet_miibus_readreg(device_t dev, int phy, int reg) 1089 { 1090 struct enet_softc *sc; 1091 int timeout; 1092 uint32_t val, status; 1093 1094 sc = device_private(dev); 1095 1096 /* clear MII update */ 1097 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1098 1099 /* read command */ 1100 ENET_REG_WRITE(sc, ENET_MMFR, 1101 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1102 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1103 1104 /* check MII update */ 1105 for (timeout = 5000; timeout > 0; --timeout) { 1106 status = ENET_REG_READ(sc, ENET_EIR); 1107 if (status & ENET_EIR_MII) 1108 break; 1109 } 1110 if (timeout <= 0) { 1111 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1112 reg); 1113 val = -1; 1114 } else { 1115 val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1116 } 1117 1118 return val; 1119 } 1120 1121 static void 1122 enet_miibus_writereg(device_t dev, int phy, int reg, int val) 1123 { 1124 struct enet_softc *sc; 1125 int timeout; 1126 1127 sc = device_private(dev); 1128 1129 /* clear MII update */ 1130 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1131 1132 /* write command */ 1133 ENET_REG_WRITE(sc, ENET_MMFR, 1134 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1135 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1136 (ENET_MMFR_DATAMASK & val)); 1137 1138 /* check MII update */ 1139 for (timeout = 5000; timeout > 0; --timeout) { 1140 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1141 break; 1142 } 1143 if (timeout <= 0) { 1144 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", 1145 reg); 1146 } 1147 } 1148 1149 static void 1150 enet_miibus_statchg(struct ifnet *ifp) 1151 { 1152 struct enet_softc *sc; 1153 struct mii_data *mii; 1154 struct ifmedia_entry *ife; 1155 uint32_t ecr, ecr0; 1156 uint32_t rcr, rcr0; 1157 uint32_t tcr, tcr0; 1158 1159 sc = ifp->if_softc; 1160 mii = &sc->sc_mii; 1161 ife = mii->mii_media.ifm_cur; 1162 1163 /* get current status */ 1164 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1165 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1166 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1167 1168 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1169 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1170 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1171 mii->mii_media_active &= ~IFM_ETH_FMASK; 1172 } 1173 1174 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1175 tcr |= ENET_TCR_FDEN; /* full duplex */ 1176 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1177 } else { 1178 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1179 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1180 } 1181 1182 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1183 /* 1184 * need to reset because 1185 * FDEN can change when ECR[ETHEREN] is 0 1186 */ 1187 enet_init_regs(sc, 0); 1188 return; 1189 } 1190 1191 switch (IFM_SUBTYPE(ife->ifm_media)) { 1192 case IFM_AUTO: 1193 case IFM_1000_T: 1194 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1195 rcr &= ~ENET_RCR_RMII_10T; 1196 break; 1197 case IFM_100_TX: 1198 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1199 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1200 break; 1201 case IFM_10_T: 1202 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1203 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1204 break; 1205 default: 1206 ecr = ecr0; 1207 rcr = rcr0; 1208 tcr = tcr0; 1209 break; 1210 } 1211 1212 if (sc->sc_rgmii == 0) 1213 ecr &= ~ENET_ECR_SPEED; 1214 1215 if (sc->sc_flowflags & IFM_FLOW) 1216 rcr |= ENET_RCR_FCE; 1217 else 1218 rcr &= ~ENET_RCR_FCE; 1219 1220 /* update registers if need change */ 1221 if (ecr != ecr0) 1222 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1223 if (rcr != rcr0) 1224 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1225 if (tcr != tcr0) 1226 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1227 } 1228 1229 /* 1230 * handling descriptors 1231 */ 1232 static void 1233 enet_init_txring(struct enet_softc *sc) 1234 { 1235 int i; 1236 1237 /* build TX ring */ 1238 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1239 sc->sc_txdesc_ring[i].tx_flags1_len = 1240 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1241 sc->sc_txdesc_ring[i].tx_databuf = 0; 1242 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1243 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1244 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1245 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1246 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1247 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1248 1249 TXDESC_WRITEOUT(i); 1250 } 1251 1252 sc->sc_tx_free = ENET_TX_RING_CNT; 1253 sc->sc_tx_considx = 0; 1254 sc->sc_tx_prodidx = 0; 1255 } 1256 1257 static int 1258 enet_init_rxring(struct enet_softc *sc) 1259 { 1260 int i, error; 1261 1262 /* build RX ring */ 1263 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1264 error = enet_alloc_rxbuf(sc, i); 1265 if (error != 0) 1266 return error; 1267 } 1268 1269 sc->sc_rx_readidx = 0; 1270 1271 return 0; 1272 } 1273 1274 static int 1275 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1276 { 1277 struct mbuf *m; 1278 int error; 1279 1280 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1281 1282 /* free mbuf if already allocated */ 1283 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1284 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1285 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1286 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1287 } 1288 1289 /* allocate new mbuf cluster */ 1290 MGETHDR(m, M_DONTWAIT, MT_DATA); 1291 if (m == NULL) 1292 return ENOBUFS; 1293 MCLGET(m, M_DONTWAIT); 1294 if (!(m->m_flags & M_EXT)) { 1295 m_freem(m); 1296 return ENOBUFS; 1297 } 1298 m->m_len = MCLBYTES; 1299 m->m_next = NULL; 1300 1301 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1302 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1303 BUS_DMA_READ | BUS_DMA_NOWAIT); 1304 if (error) { 1305 m_freem(m); 1306 return error; 1307 } 1308 1309 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1310 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1312 1313 sc->sc_rxsoft[idx].rxs_mbuf = m; 1314 enet_reset_rxdesc(sc, idx); 1315 return 0; 1316 } 1317 1318 static void 1319 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1320 { 1321 uint32_t paddr; 1322 1323 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1324 1325 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1326 RXFLAGS1_E | 1327 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1328 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1329 sc->sc_rxdesc_ring[idx].rx_flags2 = 1330 RXFLAGS2_INT; 1331 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1332 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1333 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1334 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1335 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1336 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1337 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1338 1339 RXDESC_WRITEOUT(idx); 1340 } 1341 1342 static void 1343 enet_drain_txbuf(struct enet_softc *sc) 1344 { 1345 int idx; 1346 struct enet_txsoft *txs; 1347 struct ifnet *ifp; 1348 1349 ifp = &sc->sc_ethercom.ec_if; 1350 1351 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1352 idx = ENET_TX_NEXTIDX(idx)) { 1353 1354 /* txsoft[] is used only first segment */ 1355 txs = &sc->sc_txsoft[idx]; 1356 TXDESC_READIN(idx); 1357 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1358 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1359 bus_dmamap_unload(sc->sc_dmat, 1360 txs->txs_dmamap); 1361 m_freem(txs->txs_mbuf); 1362 1363 ifp->if_oerrors++; 1364 } 1365 sc->sc_tx_free++; 1366 } 1367 } 1368 1369 static void 1370 enet_drain_rxbuf(struct enet_softc *sc) 1371 { 1372 int i; 1373 1374 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1375 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1376 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1377 bus_dmamap_unload(sc->sc_dmat, 1378 sc->sc_rxsoft[i].rxs_dmamap); 1379 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1380 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1381 } 1382 } 1383 } 1384 1385 static int 1386 enet_alloc_ring(struct enet_softc *sc) 1387 { 1388 int i, error; 1389 1390 /* 1391 * build DMA maps for TX. 1392 * TX descriptor must be able to contain mbuf chains, 1393 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1394 */ 1395 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1396 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1397 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1398 &sc->sc_txsoft[i].txs_dmamap); 1399 1400 if (error) { 1401 aprint_error_dev(sc->sc_dev, 1402 "can't create DMA map for TX descs\n"); 1403 goto fail_1; 1404 } 1405 } 1406 1407 /* 1408 * build DMA maps for RX. 1409 * RX descripter contains An mbuf cluster, 1410 * and make up a dmamap. 1411 */ 1412 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1413 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1414 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1415 &sc->sc_rxsoft[i].rxs_dmamap); 1416 if (error) { 1417 aprint_error_dev(sc->sc_dev, 1418 "can't create DMA map for RX descs\n"); 1419 goto fail_2; 1420 } 1421 } 1422 1423 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1424 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1425 return -1; 1426 memset(sc->sc_txdesc_ring, 0, 1427 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1428 1429 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1430 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1431 return -1; 1432 memset(sc->sc_rxdesc_ring, 0, 1433 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1434 1435 return 0; 1436 1437 fail_2: 1438 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1439 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1440 bus_dmamap_destroy(sc->sc_dmat, 1441 sc->sc_rxsoft[i].rxs_dmamap); 1442 } 1443 fail_1: 1444 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1445 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1446 bus_dmamap_destroy(sc->sc_dmat, 1447 sc->sc_txsoft[i].txs_dmamap); 1448 } 1449 return error; 1450 } 1451 1452 static int 1453 enet_encap_mbufalign(struct mbuf **mp) 1454 { 1455 struct mbuf *m, *m0, *mt, *p, *x; 1456 void *ap; 1457 uint32_t alignoff, chiplen; 1458 1459 /* 1460 * iMX6 SoC ethernet controller requires 1461 * address of buffer must aligned 8, and 1462 * length of buffer must be greater than 10 (first fragment only?) 1463 */ 1464 #define ALIGNBYTE 8 1465 #define MINBUFSIZE 10 1466 #define ALIGN_PTR(p, align) \ 1467 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1468 1469 m0 = *mp; 1470 mt = p = NULL; 1471 for (m = m0; m != NULL; m = m->m_next) { 1472 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1473 if (m->m_len < (ALIGNBYTE * 2)) { 1474 /* 1475 * rearrange mbuf data aligned 1476 * 1477 * align 8 * * * * * 1478 * +0123456789abcdef0123456789abcdef0 1479 * FROM m->m_data[___________abcdefghijklmn_______] 1480 * 1481 * +0123456789abcdef0123456789abcdef0 1482 * TO m->m_data[________abcdefghijklm___________] or 1483 * m->m_data[________________abcdefghijklmn__] 1484 */ 1485 if ((alignoff != 0) && (m->m_len != 0)) { 1486 chiplen = ALIGNBYTE - alignoff; 1487 if (M_LEADINGSPACE(m) >= alignoff) { 1488 ap = m->m_data - alignoff; 1489 memmove(ap, m->m_data, m->m_len); 1490 m->m_data = ap; 1491 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1492 ap = m->m_data + chiplen; 1493 memmove(ap, m->m_data, m->m_len); 1494 m->m_data = ap; 1495 } else { 1496 /* 1497 * no space to align data. (M_READONLY?) 1498 * allocate new mbuf aligned, 1499 * and copy to it. 1500 */ 1501 MGET(x, M_DONTWAIT, m->m_type); 1502 if (x == NULL) { 1503 m_freem(m); 1504 return ENOBUFS; 1505 } 1506 MCLAIM(x, m->m_owner); 1507 if (m->m_flags & M_PKTHDR) 1508 M_MOVE_PKTHDR(x, m); 1509 x->m_len = m->m_len; 1510 x->m_data = ALIGN_PTR(x->m_data, 1511 ALIGNBYTE); 1512 memcpy(mtod(x, void *), mtod(m, void *), 1513 m->m_len); 1514 p->m_next = x; 1515 x->m_next = m_free(m); 1516 m = x; 1517 } 1518 } 1519 1520 /* 1521 * fill 1st mbuf at least 10byte 1522 * 1523 * align 8 * * * * * 1524 * +0123456789abcdef0123456789abcdef0 1525 * FROM m->m_data[________abcde___________________] 1526 * m->m_data[__fg____________________________] 1527 * m->m_data[_________________hi_____________] 1528 * m->m_data[__________jk____________________] 1529 * m->m_data[____l___________________________] 1530 * 1531 * +0123456789abcdef0123456789abcdef0 1532 * TO m->m_data[________abcdefghij______________] 1533 * m->m_data[________________________________] 1534 * m->m_data[________________________________] 1535 * m->m_data[___________k____________________] 1536 * m->m_data[____l___________________________] 1537 */ 1538 if (mt == NULL) { 1539 mt = m; 1540 while (mt->m_len == 0) { 1541 mt = mt->m_next; 1542 if (mt == NULL) { 1543 m_freem(m); 1544 return ENOBUFS; 1545 } 1546 } 1547 1548 /* mt = 1st mbuf, x = 2nd mbuf */ 1549 x = mt->m_next; 1550 while (mt->m_len < MINBUFSIZE) { 1551 if (x == NULL) { 1552 m_freem(m); 1553 return ENOBUFS; 1554 } 1555 1556 alignoff = (uintptr_t)x->m_data & 1557 (ALIGNBYTE - 1); 1558 chiplen = ALIGNBYTE - alignoff; 1559 if (chiplen > x->m_len) { 1560 chiplen = x->m_len; 1561 } else if ((mt->m_len + chiplen) < 1562 MINBUFSIZE) { 1563 /* 1564 * next mbuf should be greater 1565 * than ALIGNBYTE? 1566 */ 1567 if (x->m_len >= (chiplen + 1568 ALIGNBYTE * 2)) 1569 chiplen += ALIGNBYTE; 1570 else 1571 chiplen = x->m_len; 1572 } 1573 1574 if (chiplen && 1575 (M_TRAILINGSPACE(mt) < chiplen)) { 1576 /* 1577 * move data to the begining of 1578 * m_dat[] (aligned) to en- 1579 * large trailingspace 1580 */ 1581 if (mt->m_flags & M_EXT) { 1582 ap = mt->m_ext.ext_buf; 1583 } else if (mt->m_flags & 1584 M_PKTHDR) { 1585 ap = mt->m_pktdat; 1586 } else { 1587 ap = mt->m_dat; 1588 } 1589 ap = ALIGN_PTR(ap, ALIGNBYTE); 1590 memcpy(ap, mt->m_data, 1591 mt->m_len); 1592 mt->m_data = ap; 1593 } 1594 1595 if (chiplen && 1596 (M_TRAILINGSPACE(mt) >= chiplen)) { 1597 memcpy(mt->m_data + mt->m_len, 1598 x->m_data, chiplen); 1599 mt->m_len += chiplen; 1600 m_adj(x, chiplen); 1601 } 1602 1603 x = x->m_next; 1604 } 1605 } 1606 1607 } else { 1608 mt = m; 1609 1610 /* 1611 * allocate new mbuf x, and rearrange as below; 1612 * 1613 * align 8 * * * * * 1614 * +0123456789abcdef0123456789abcdef0 1615 * FROM m->m_data[____________abcdefghijklmnopq___] 1616 * 1617 * +0123456789abcdef0123456789abcdef0 1618 * TO x->m_data[________abcdefghijkl____________] 1619 * m->m_data[________________________mnopq___] 1620 * 1621 */ 1622 if (alignoff != 0) { 1623 /* at least ALIGNBYTE */ 1624 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1625 1626 MGET(x, M_DONTWAIT, m->m_type); 1627 if (x == NULL) { 1628 m_freem(m); 1629 return ENOBUFS; 1630 } 1631 MCLAIM(x, m->m_owner); 1632 if (m->m_flags & M_PKTHDR) 1633 M_MOVE_PKTHDR(x, m); 1634 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1635 memcpy(mtod(x, void *), mtod(m, void *), 1636 chiplen); 1637 x->m_len = chiplen; 1638 x->m_next = m; 1639 m_adj(m, chiplen); 1640 1641 if (p == NULL) 1642 m0 = x; 1643 else 1644 p->m_next = x; 1645 } 1646 } 1647 p = m; 1648 } 1649 *mp = m0; 1650 1651 return 0; 1652 } 1653 1654 static int 1655 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1656 { 1657 bus_dmamap_t map; 1658 struct mbuf *m; 1659 int csumflags, idx, i, error; 1660 uint32_t flags1, flags2; 1661 1662 idx = sc->sc_tx_prodidx; 1663 map = sc->sc_txsoft[idx].txs_dmamap; 1664 1665 /* align mbuf data for claim of ENET */ 1666 error = enet_encap_mbufalign(mp); 1667 if (error != 0) 1668 return error; 1669 1670 m = *mp; 1671 csumflags = m->m_pkthdr.csum_flags; 1672 1673 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1674 BUS_DMA_NOWAIT); 1675 if (error != 0) { 1676 device_printf(sc->sc_dev, 1677 "Error mapping mbuf into TX chain: error=%d\n", error); 1678 m_freem(m); 1679 return error; 1680 } 1681 1682 if (map->dm_nsegs > sc->sc_tx_free) { 1683 bus_dmamap_unload(sc->sc_dmat, map); 1684 device_printf(sc->sc_dev, 1685 "too many mbuf chain %d\n", map->dm_nsegs); 1686 m_freem(m); 1687 return ENOBUFS; 1688 } 1689 1690 /* fill protocol cksum zero beforehand */ 1691 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1692 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1693 int ehlen; 1694 uint16_t etype; 1695 1696 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1697 switch (ntohs(etype)) { 1698 case ETHERTYPE_IP: 1699 case ETHERTYPE_IPV6: 1700 ehlen = ETHER_HDR_LEN; 1701 break; 1702 case ETHERTYPE_VLAN: 1703 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1704 break; 1705 default: 1706 ehlen = 0; 1707 break; 1708 } 1709 1710 if (ehlen) { 1711 const int off = 1712 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1713 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1714 if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1715 uint16_t zero = 0; 1716 m_copyback(m, ehlen + off, sizeof(zero), &zero); 1717 } 1718 } 1719 } 1720 1721 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1722 BUS_DMASYNC_PREWRITE); 1723 1724 for (i = 0; i < map->dm_nsegs; i++) { 1725 flags1 = TXFLAGS1_R; 1726 flags2 = 0; 1727 1728 if (i == 0) { 1729 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1730 sc->sc_txsoft[idx].txs_mbuf = m; 1731 } 1732 1733 /* checksum offloading */ 1734 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1735 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1736 flags2 |= TXFLAGS2_PINS; 1737 if (csumflags & (M_CSUM_IPv4)) 1738 flags2 |= TXFLAGS2_IINS; 1739 1740 if (i == map->dm_nsegs - 1) { 1741 /* mark last segment */ 1742 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1743 flags2 |= TXFLAGS2_INT; 1744 } 1745 if (idx == ENET_TX_RING_CNT - 1) { 1746 /* mark end of ring */ 1747 flags1 |= TXFLAGS1_W; 1748 } 1749 1750 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1751 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1752 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1753 TXDESC_WRITEOUT(idx); 1754 1755 sc->sc_txdesc_ring[idx].tx_flags1_len = 1756 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1757 TXDESC_WRITEOUT(idx); 1758 1759 idx = ENET_TX_NEXTIDX(idx); 1760 sc->sc_tx_free--; 1761 } 1762 1763 sc->sc_tx_prodidx = idx; 1764 1765 return 0; 1766 } 1767 1768 /* 1769 * device initialize 1770 */ 1771 static int 1772 enet_init_regs(struct enet_softc *sc, int init) 1773 { 1774 struct mii_data *mii; 1775 struct ifmedia_entry *ife; 1776 paddr_t paddr; 1777 uint32_t val; 1778 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1779 1780 if (init) { 1781 fulldup = 1; 1782 ecr_speed = ENET_ECR_SPEED; 1783 rcr_speed = 0; 1784 flowctrl = 0; 1785 } else { 1786 mii = &sc->sc_mii; 1787 ife = mii->mii_media.ifm_cur; 1788 1789 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1790 fulldup = 1; 1791 else 1792 fulldup = 0; 1793 1794 switch (IFM_SUBTYPE(ife->ifm_media)) { 1795 case IFM_10_T: 1796 ecr_speed = 0; 1797 rcr_speed = ENET_RCR_RMII_10T; 1798 break; 1799 case IFM_100_TX: 1800 ecr_speed = 0; 1801 rcr_speed = 0; 1802 break; 1803 default: 1804 ecr_speed = ENET_ECR_SPEED; 1805 rcr_speed = 0; 1806 break; 1807 } 1808 1809 flowctrl = sc->sc_flowflags & IFM_FLOW; 1810 } 1811 1812 if (sc->sc_rgmii == 0) 1813 ecr_speed = 0; 1814 1815 /* reset */ 1816 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1817 1818 /* mask and clear all interrupt */ 1819 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1820 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1821 1822 /* full duplex */ 1823 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1824 1825 /* clear and enable MIB register */ 1826 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1827 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1828 1829 /* MII speed setup. MDCclk(=2.5MHz) = ENET_PLL/((val+1)*2) */ 1830 val = ((sc->sc_pllclock) / 500000 - 1) / 10; 1831 ENET_REG_WRITE(sc, ENET_MSCR, val << 1); 1832 1833 /* Opcode/Pause Duration */ 1834 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1835 1836 /* Receive FIFO */ 1837 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1838 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1839 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1840 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1841 1842 /* Transmit FIFO */ 1843 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1844 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1845 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1846 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1847 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1848 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1849 1850 /* hardware checksum is default off (override in TX descripter) */ 1851 ENET_REG_WRITE(sc, ENET_TACC, 0); 1852 1853 /* 1854 * align ethernet payload on 32bit, discard frames with MAC layer error, 1855 * and don't discard checksum error 1856 */ 1857 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1858 1859 /* maximum frame size */ 1860 val = ENET_DEFAULT_PKT_LEN; 1861 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1862 1863 if (sc->sc_rgmii == 0) 1864 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1865 else 1866 miimode = ENET_RCR_RGMII_EN; 1867 ENET_REG_WRITE(sc, ENET_RCR, 1868 ENET_RCR_PADEN | /* RX frame padding remove */ 1869 miimode | 1870 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1871 rcr_speed | 1872 (fulldup ? 0 : ENET_RCR_DRT) | 1873 ENET_RCR_MAX_FL(val)); 1874 1875 /* Maximum Receive BufSize per one descriptor */ 1876 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1877 1878 1879 /* TX/RX Descriptor Physical Address */ 1880 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1881 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1882 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1883 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1884 /* sync cache */ 1885 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1886 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1887 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1888 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1889 1890 /* enable interrupts */ 1891 val = ENET_EIMR|ENET_EIR_TXF|ENET_EIR_RXF|ENET_EIR_EBERR; 1892 if (sc->sc_imxtype == 7) 1893 val |= ENET_EIR_TXF2|ENET_EIR_RXF2|ENET_EIR_TXF1|ENET_EIR_RXF1; 1894 ENET_REG_WRITE(sc, ENET_EIMR, val); 1895 1896 /* enable ether */ 1897 ENET_REG_WRITE(sc, ENET_ECR, 1898 #if _BYTE_ORDER == _LITTLE_ENDIAN 1899 ENET_ECR_DBSWP | 1900 #endif 1901 ecr_speed | 1902 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1903 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1904 1905 return 0; 1906 } 1907 1908 static int 1909 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1910 bus_dmamap_t *mapp) 1911 { 1912 bus_dma_segment_t seglist[1]; 1913 int nsegs, error; 1914 1915 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1916 1, &nsegs, M_NOWAIT)) != 0) { 1917 device_printf(sc->sc_dev, 1918 "unable to allocate DMA buffer, error=%d\n", error); 1919 goto fail_alloc; 1920 } 1921 1922 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1923 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1924 device_printf(sc->sc_dev, 1925 "unable to map DMA buffer, error=%d\n", 1926 error); 1927 goto fail_map; 1928 } 1929 1930 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1931 BUS_DMA_NOWAIT, mapp)) != 0) { 1932 device_printf(sc->sc_dev, 1933 "unable to create DMA map, error=%d\n", error); 1934 goto fail_create; 1935 } 1936 1937 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1938 BUS_DMA_NOWAIT)) != 0) { 1939 aprint_error_dev(sc->sc_dev, 1940 "unable to load DMA map, error=%d\n", error); 1941 goto fail_load; 1942 } 1943 1944 return 0; 1945 1946 fail_load: 1947 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1948 fail_create: 1949 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1950 fail_map: 1951 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1952 fail_alloc: 1953 return error; 1954 } 1955