1 /* $NetBSD: if_enet.c,v 1.23 2019/05/28 07:41:46 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.23 2019/05/28 07:41:46 msaitoh Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static int enet_intr(void *); 136 static void enet_tick(void *); 137 static int enet_tx_intr(void *); 138 static int enet_rx_intr(void *); 139 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 140 int); 141 142 static void enet_start(struct ifnet *); 143 static int enet_ifflags_cb(struct ethercom *); 144 static int enet_ioctl(struct ifnet *, u_long, void *); 145 static int enet_init(struct ifnet *); 146 static void enet_stop(struct ifnet *, int); 147 static void enet_watchdog(struct ifnet *); 148 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 149 150 static int enet_miibus_readreg(device_t, int, int, uint16_t *); 151 static int enet_miibus_writereg(device_t, int, int, uint16_t); 152 static void enet_miibus_statchg(struct ifnet *); 153 154 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 156 static void enet_setmulti(struct enet_softc *); 157 static int enet_encap_mbufalign(struct mbuf **); 158 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 159 static int enet_init_regs(struct enet_softc *, int); 160 static int enet_alloc_ring(struct enet_softc *); 161 static void enet_init_txring(struct enet_softc *); 162 static int enet_init_rxring(struct enet_softc *); 163 static void enet_reset_rxdesc(struct enet_softc *, int); 164 static int enet_alloc_rxbuf(struct enet_softc *, int); 165 static void enet_drain_txbuf(struct enet_softc *); 166 static void enet_drain_rxbuf(struct enet_softc *); 167 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 168 bus_dmamap_t *); 169 170 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 171 enet_match, enet_attach, NULL, NULL); 172 173 void 174 enet_attach_common(device_t self, bus_space_tag_t iot, 175 bus_dma_tag_t dmat, bus_addr_t addr, bus_size_t size, int irq) 176 { 177 struct enet_softc *sc = device_private(self); 178 struct ifnet *ifp; 179 struct mii_data * const mii = &sc->sc_mii; 180 181 sc->sc_dev = self; 182 sc->sc_iot = iot; 183 sc->sc_addr = addr; 184 sc->sc_dmat = dmat; 185 186 aprint_naive("\n"); 187 aprint_normal(": Gigabit Ethernet Controller\n"); 188 if (bus_space_map(sc->sc_iot, sc->sc_addr, size, 0, 189 &sc->sc_ioh)) { 190 aprint_error_dev(self, "cannot map registers\n"); 191 return; 192 } 193 194 /* allocate dma buffer */ 195 if (enet_alloc_ring(sc)) 196 return; 197 198 #define IS_ENADDR_ZERO(enaddr) \ 199 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 200 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 201 202 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 203 /* by any chance, mac-address is already set by bootloader? */ 204 enet_gethwaddr(sc, sc->sc_enaddr); 205 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 206 /* give up. set randomly */ 207 uint32_t eaddr = random(); 208 /* not multicast */ 209 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 210 sc->sc_enaddr[1] = eaddr >> 16; 211 sc->sc_enaddr[2] = eaddr >> 8; 212 sc->sc_enaddr[3] = eaddr; 213 eaddr = random(); 214 sc->sc_enaddr[4] = eaddr >> 8; 215 sc->sc_enaddr[5] = eaddr; 216 217 aprint_error_dev(self, 218 "cannot get mac address. set randomly\n"); 219 } 220 } 221 enet_sethwaddr(sc, sc->sc_enaddr); 222 223 aprint_normal_dev(self, "Ethernet address %s\n", 224 ether_sprintf(sc->sc_enaddr)); 225 226 enet_init_regs(sc, 1); 227 228 /* setup interrupt handlers */ 229 if ((sc->sc_ih = intr_establish(irq, IPL_NET, 230 IST_LEVEL, enet_intr, sc)) == NULL) { 231 aprint_error_dev(self, "unable to establish interrupt\n"); 232 goto failure; 233 } 234 235 if (sc->sc_imxtype == 7) { 236 /* i.MX7 use 3 interrupts */ 237 if ((sc->sc_ih2 = intr_establish(irq + 1, IPL_NET, 238 IST_LEVEL, enet_intr, sc)) == NULL) { 239 aprint_error_dev(self, 240 "unable to establish 2nd interrupt\n"); 241 intr_disestablish(sc->sc_ih); 242 goto failure; 243 } 244 if ((sc->sc_ih3 = intr_establish(irq + 2, IPL_NET, 245 IST_LEVEL, enet_intr, sc)) == NULL) { 246 aprint_error_dev(self, 247 "unable to establish 3rd interrupt\n"); 248 intr_disestablish(sc->sc_ih2); 249 intr_disestablish(sc->sc_ih); 250 goto failure; 251 } 252 } 253 254 /* callout will be scheduled from enet_init() */ 255 callout_init(&sc->sc_tick_ch, 0); 256 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 257 258 /* setup ifp */ 259 ifp = &sc->sc_ethercom.ec_if; 260 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 261 ifp->if_softc = sc; 262 ifp->if_mtu = ETHERMTU; 263 ifp->if_baudrate = IF_Gbps(1); 264 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 265 ifp->if_ioctl = enet_ioctl; 266 ifp->if_start = enet_start; 267 ifp->if_init = enet_init; 268 ifp->if_stop = enet_stop; 269 ifp->if_watchdog = enet_watchdog; 270 271 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 272 #ifdef ENET_SUPPORT_JUMBO 273 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 274 #endif 275 276 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 277 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 278 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 279 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 280 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 281 282 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 283 IFQ_SET_READY(&ifp->if_snd); 284 285 /* setup MII */ 286 sc->sc_ethercom.ec_mii = mii; 287 mii->mii_ifp = ifp; 288 mii->mii_readreg = enet_miibus_readreg; 289 mii->mii_writereg = enet_miibus_writereg; 290 mii->mii_statchg = enet_miibus_statchg; 291 ifmedia_init(&mii->mii_media, 0, ether_mediachange, enet_mediastatus); 292 293 /* try to attach PHY */ 294 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 295 if (LIST_FIRST(&mii->mii_phys) == NULL) { 296 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 297 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 298 } else { 299 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 300 } 301 302 if_attach(ifp); 303 ether_ifattach(ifp, sc->sc_enaddr); 304 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 305 306 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 307 RND_TYPE_NET, RND_FLAG_DEFAULT); 308 309 #ifdef ENET_EVENT_COUNTER 310 enet_attach_evcnt(sc); 311 #endif 312 313 sc->sc_stopping = false; 314 315 return; 316 317 failure: 318 bus_space_unmap(sc->sc_iot, sc->sc_ioh, size); 319 return; 320 } 321 322 #ifdef ENET_EVENT_COUNTER 323 static void 324 enet_attach_evcnt(struct enet_softc *sc) 325 { 326 const char *xname; 327 328 xname = device_xname(sc->sc_dev); 329 330 #define ENET_EVCNT_ATTACH(name) \ 331 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 332 NULL, xname, #name); 333 334 ENET_EVCNT_ATTACH(t_drop); 335 ENET_EVCNT_ATTACH(t_packets); 336 ENET_EVCNT_ATTACH(t_bc_pkt); 337 ENET_EVCNT_ATTACH(t_mc_pkt); 338 ENET_EVCNT_ATTACH(t_crc_align); 339 ENET_EVCNT_ATTACH(t_undersize); 340 ENET_EVCNT_ATTACH(t_oversize); 341 ENET_EVCNT_ATTACH(t_frag); 342 ENET_EVCNT_ATTACH(t_jab); 343 ENET_EVCNT_ATTACH(t_col); 344 ENET_EVCNT_ATTACH(t_p64); 345 ENET_EVCNT_ATTACH(t_p65to127n); 346 ENET_EVCNT_ATTACH(t_p128to255n); 347 ENET_EVCNT_ATTACH(t_p256to511); 348 ENET_EVCNT_ATTACH(t_p512to1023); 349 ENET_EVCNT_ATTACH(t_p1024to2047); 350 ENET_EVCNT_ATTACH(t_p_gte2048); 351 ENET_EVCNT_ATTACH(t_octets); 352 ENET_EVCNT_ATTACH(r_packets); 353 ENET_EVCNT_ATTACH(r_bc_pkt); 354 ENET_EVCNT_ATTACH(r_mc_pkt); 355 ENET_EVCNT_ATTACH(r_crc_align); 356 ENET_EVCNT_ATTACH(r_undersize); 357 ENET_EVCNT_ATTACH(r_oversize); 358 ENET_EVCNT_ATTACH(r_frag); 359 ENET_EVCNT_ATTACH(r_jab); 360 ENET_EVCNT_ATTACH(r_p64); 361 ENET_EVCNT_ATTACH(r_p65to127); 362 ENET_EVCNT_ATTACH(r_p128to255); 363 ENET_EVCNT_ATTACH(r_p256to511); 364 ENET_EVCNT_ATTACH(r_p512to1023); 365 ENET_EVCNT_ATTACH(r_p1024to2047); 366 ENET_EVCNT_ATTACH(r_p_gte2048); 367 ENET_EVCNT_ATTACH(r_octets); 368 } 369 370 static void 371 enet_update_evcnt(struct enet_softc *sc) 372 { 373 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 374 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 375 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 376 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 377 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 378 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 379 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 380 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 381 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 382 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 383 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 384 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 385 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 386 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 387 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 388 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 389 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 390 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 391 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 392 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 393 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 394 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 395 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 396 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 397 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 398 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 399 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 400 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 401 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 402 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 403 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 404 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 405 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 406 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 407 } 408 #endif /* ENET_EVENT_COUNTER */ 409 410 static void 411 enet_tick(void *arg) 412 { 413 struct enet_softc *sc; 414 struct mii_data *mii; 415 struct ifnet *ifp; 416 int s; 417 418 sc = arg; 419 mii = &sc->sc_mii; 420 ifp = &sc->sc_ethercom.ec_if; 421 422 s = splnet(); 423 424 if (sc->sc_stopping) 425 goto out; 426 427 #ifdef ENET_EVENT_COUNTER 428 enet_update_evcnt(sc); 429 #endif 430 431 /* update counters */ 432 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 433 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 434 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 435 436 /* clear counters */ 437 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 438 ENET_REG_WRITE(sc, ENET_MIBC, 0); 439 440 mii_tick(mii); 441 out: 442 443 if (!sc->sc_stopping) 444 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 445 446 splx(s); 447 } 448 449 static int 450 enet_intr(void *arg) 451 { 452 struct enet_softc *sc; 453 struct ifnet *ifp; 454 uint32_t status; 455 456 sc = arg; 457 status = ENET_REG_READ(sc, ENET_EIR); 458 459 if (sc->sc_imxtype == 7) { 460 if (status & (ENET_EIR_TXF | ENET_EIR_TXF1 | ENET_EIR_TXF2)) 461 enet_tx_intr(arg); 462 if (status & (ENET_EIR_RXF | ENET_EIR_RXF1 | ENET_EIR_RXF2)) 463 enet_rx_intr(arg); 464 } else { 465 if (status & ENET_EIR_TXF) 466 enet_tx_intr(arg); 467 if (status & ENET_EIR_RXF) 468 enet_rx_intr(arg); 469 } 470 471 if (status & ENET_EIR_EBERR) { 472 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 473 ifp = &sc->sc_ethercom.ec_if; 474 enet_stop(ifp, 1); 475 enet_init(ifp); 476 } else { 477 ENET_REG_WRITE(sc, ENET_EIR, status); 478 } 479 480 rnd_add_uint32(&sc->sc_rnd_source, status); 481 482 return 1; 483 } 484 485 static int 486 enet_tx_intr(void *arg) 487 { 488 struct enet_softc *sc; 489 struct ifnet *ifp; 490 struct enet_txsoft *txs; 491 int idx; 492 493 sc = (struct enet_softc *)arg; 494 ifp = &sc->sc_ethercom.ec_if; 495 496 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 497 idx = ENET_TX_NEXTIDX(idx)) { 498 499 txs = &sc->sc_txsoft[idx]; 500 501 TXDESC_READIN(idx); 502 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 503 /* This TX Descriptor has not been transmitted yet */ 504 break; 505 } 506 507 /* txsoft is available on first segment (TXFLAGS1_T1) */ 508 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 509 bus_dmamap_unload(sc->sc_dmat, 510 txs->txs_dmamap); 511 m_freem(txs->txs_mbuf); 512 ifp->if_opackets++; 513 } 514 515 /* checking error */ 516 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 517 uint32_t flags2; 518 519 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 520 521 if (flags2 & (TXFLAGS2_TXE | 522 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 523 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 524 #ifdef DEBUG_ENET 525 if (enet_debug) { 526 char flagsbuf[128]; 527 528 snprintb(flagsbuf, sizeof(flagsbuf), 529 "\20" "\20TRANSMIT" "\16UNDERFLOW" 530 "\15COLLISION" "\14FRAME" 531 "\13LATECOLLISION" "\12OVERFLOW", 532 flags2); 533 534 device_printf(sc->sc_dev, 535 "txdesc[%d]: transmit error: " 536 "flags2=%s\n", idx, flagsbuf); 537 } 538 #endif /* DEBUG_ENET */ 539 ifp->if_oerrors++; 540 } 541 } 542 543 sc->sc_tx_free++; 544 } 545 sc->sc_tx_considx = idx; 546 547 if (sc->sc_tx_free > 0) 548 ifp->if_flags &= ~IFF_OACTIVE; 549 550 /* 551 * No more pending TX descriptor, 552 * cancel the watchdog timer. 553 */ 554 if (sc->sc_tx_free == ENET_TX_RING_CNT) 555 ifp->if_timer = 0; 556 557 return 1; 558 } 559 560 static int 561 enet_rx_intr(void *arg) 562 { 563 struct enet_softc *sc; 564 struct ifnet *ifp; 565 struct enet_rxsoft *rxs; 566 int idx, len, amount; 567 uint32_t flags1, flags2; 568 struct mbuf *m, *m0, *mprev; 569 570 sc = arg; 571 ifp = &sc->sc_ethercom.ec_if; 572 573 m0 = mprev = NULL; 574 amount = 0; 575 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 576 577 rxs = &sc->sc_rxsoft[idx]; 578 579 RXDESC_READIN(idx); 580 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 581 /* This RX Descriptor has not been received yet */ 582 break; 583 } 584 585 /* 586 * build mbuf from RX Descriptor if needed 587 */ 588 m = rxs->rxs_mbuf; 589 rxs->rxs_mbuf = NULL; 590 591 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 592 len = RXFLAGS1_LEN(flags1); 593 594 #define RACC_SHIFT16 2 595 if (m0 == NULL) { 596 m0 = m; 597 m_adj(m0, RACC_SHIFT16); 598 len -= RACC_SHIFT16; 599 m->m_len = len; 600 amount = len; 601 } else { 602 if (flags1 & RXFLAGS1_L) 603 len = len - amount - RACC_SHIFT16; 604 605 m->m_len = len; 606 amount += len; 607 if (m->m_flags & M_PKTHDR) 608 m_remove_pkthdr(m); 609 mprev->m_next = m; 610 } 611 mprev = m; 612 613 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 614 615 if (flags1 & RXFLAGS1_L) { 616 /* last buffer */ 617 if ((amount < ETHER_HDR_LEN) || 618 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 619 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 620 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 621 RXFLAGS2_CE)))) { 622 623 #ifdef DEBUG_ENET 624 if (enet_debug) { 625 char flags1buf[128], flags2buf[128]; 626 snprintb(flags1buf, sizeof(flags1buf), 627 "\20" "\31MISS" "\26LENGTHVIOLATION" 628 "\25NONOCTET" "\23CRC" "\22OVERRUN" 629 "\21TRUNCATED", flags1); 630 snprintb(flags2buf, sizeof(flags2buf), 631 "\20" "\40MAC" "\33PHY" 632 "\32COLLISION", flags2); 633 634 DEVICE_DPRINTF( 635 "rxdesc[%d]: receive error: " 636 "flags1=%s,flags2=%s,len=%d\n", 637 idx, flags1buf, flags2buf, amount); 638 } 639 #endif /* DEBUG_ENET */ 640 ifp->if_ierrors++; 641 m_freem(m0); 642 643 } else { 644 /* packet receive ok */ 645 m_set_rcvif(m0, ifp); 646 m0->m_pkthdr.len = amount; 647 648 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 649 rxs->rxs_dmamap->dm_mapsize, 650 BUS_DMASYNC_PREREAD); 651 652 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 653 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 654 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 655 enet_rx_csum(sc, ifp, m0, idx); 656 657 if_percpuq_enqueue(ifp->if_percpuq, m0); 658 } 659 660 m0 = NULL; 661 mprev = NULL; 662 amount = 0; 663 664 } else { 665 /* continued from previous buffer */ 666 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 667 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 668 } 669 670 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 671 if (enet_alloc_rxbuf(sc, idx) != 0) { 672 panic("enet_alloc_rxbuf NULL\n"); 673 } 674 } 675 sc->sc_rx_readidx = idx; 676 677 /* re-enable RX DMA to make sure */ 678 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 679 680 return 1; 681 } 682 683 static void 684 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 685 { 686 uint32_t flags2; 687 uint8_t proto; 688 689 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 690 691 if (flags2 & RXFLAGS2_IPV6) { 692 proto = sc->sc_rxdesc_ring[idx].rx_proto; 693 694 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 695 if ((proto == IPPROTO_TCP) && 696 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 697 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 698 else if ((proto == IPPROTO_UDP) && 699 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 700 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 701 else 702 return; 703 704 /* IPv6 protocol checksum error */ 705 if (flags2 & RXFLAGS2_PCR) 706 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 707 708 } else { 709 struct ether_header *eh; 710 uint8_t *ip; 711 712 eh = mtod(m, struct ether_header *); 713 714 /* XXX: is an IPv4? */ 715 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 716 return; 717 ip = (uint8_t *)(eh + 1); 718 if ((ip[0] & 0xf0) == 0x40) 719 return; 720 721 proto = sc->sc_rxdesc_ring[idx].rx_proto; 722 if (flags2 & RXFLAGS2_ICE) { 723 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 724 m->m_pkthdr.csum_flags |= 725 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 726 } 727 } else { 728 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 729 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 730 } 731 732 /* 733 * PCR is valid when 734 * ICE == 0 and FRAG == 0 735 */ 736 if (flags2 & RXFLAGS2_FRAG) 737 return; 738 739 /* 740 * PCR is valid when proto is TCP or UDP 741 */ 742 if ((proto == IPPROTO_TCP) && 743 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 744 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 745 else if ((proto == IPPROTO_UDP) && 746 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 747 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 748 else 749 return; 750 751 /* IPv4 protocol cksum error */ 752 if (flags2 & RXFLAGS2_PCR) 753 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 754 } 755 } 756 } 757 758 static void 759 enet_setmulti(struct enet_softc *sc) 760 { 761 struct ethercom *ec = &sc->sc_ethercom; 762 struct ifnet *ifp = &ec->ec_if; 763 struct ether_multi *enm; 764 struct ether_multistep step; 765 int promisc; 766 uint32_t crc; 767 uint32_t gaddr[2]; 768 769 promisc = 0; 770 if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 0) { 771 ifp->if_flags |= IFF_ALLMULTI; 772 if (ifp->if_flags & IFF_PROMISC) 773 promisc = 1; 774 gaddr[0] = gaddr[1] = 0xffffffff; 775 } else { 776 gaddr[0] = gaddr[1] = 0; 777 778 ETHER_LOCK(ec); 779 ETHER_FIRST_MULTI(step, ec, enm); 780 while (enm != NULL) { 781 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 782 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 783 ETHER_NEXT_MULTI(step, enm); 784 } 785 ETHER_UNLOCK(ec); 786 } 787 788 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 789 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 790 791 if (promisc) { 792 /* match all packet */ 793 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 794 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 795 } else { 796 /* don't match any packet */ 797 ENET_REG_WRITE(sc, ENET_IAUR, 0); 798 ENET_REG_WRITE(sc, ENET_IALR, 0); 799 } 800 } 801 802 static void 803 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 804 { 805 uint32_t paddr; 806 807 paddr = ENET_REG_READ(sc, ENET_PALR); 808 hwaddr[0] = paddr >> 24; 809 hwaddr[1] = paddr >> 16; 810 hwaddr[2] = paddr >> 8; 811 hwaddr[3] = paddr; 812 813 paddr = ENET_REG_READ(sc, ENET_PAUR); 814 hwaddr[4] = paddr >> 24; 815 hwaddr[5] = paddr >> 16; 816 } 817 818 static void 819 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 820 { 821 uint32_t paddr; 822 823 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 824 hwaddr[3]; 825 ENET_REG_WRITE(sc, ENET_PALR, paddr); 826 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 827 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 828 } 829 830 /* 831 * ifnet interfaces 832 */ 833 static int 834 enet_init(struct ifnet *ifp) 835 { 836 struct enet_softc *sc; 837 int s, error; 838 839 sc = ifp->if_softc; 840 841 s = splnet(); 842 843 enet_init_regs(sc, 0); 844 enet_init_txring(sc); 845 error = enet_init_rxring(sc); 846 if (error != 0) { 847 enet_drain_rxbuf(sc); 848 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 849 goto init_failure; 850 } 851 852 /* reload mac address */ 853 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 854 enet_sethwaddr(sc, sc->sc_enaddr); 855 856 /* program multicast address */ 857 enet_setmulti(sc); 858 859 /* update if_flags */ 860 ifp->if_flags |= IFF_RUNNING; 861 ifp->if_flags &= ~IFF_OACTIVE; 862 863 /* update local copy of if_flags */ 864 sc->sc_if_flags = ifp->if_flags; 865 866 /* mii */ 867 mii_mediachg(&sc->sc_mii); 868 869 /* enable RX DMA */ 870 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 871 872 sc->sc_stopping = false; 873 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 874 875 init_failure: 876 splx(s); 877 878 return error; 879 } 880 881 static void 882 enet_start(struct ifnet *ifp) 883 { 884 struct enet_softc *sc; 885 struct mbuf *m; 886 int npkt; 887 888 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 889 return; 890 891 sc = ifp->if_softc; 892 for (npkt = 0; ; npkt++) { 893 IFQ_POLL(&ifp->if_snd, m); 894 if (m == NULL) 895 break; 896 897 if (sc->sc_tx_free <= 0) { 898 /* no tx descriptor now... */ 899 ifp->if_flags |= IFF_OACTIVE; 900 DEVICE_DPRINTF("TX descriptor is full\n"); 901 break; 902 } 903 904 IFQ_DEQUEUE(&ifp->if_snd, m); 905 906 if (enet_encap_txring(sc, &m) != 0) { 907 /* too many mbuf chains? */ 908 ifp->if_flags |= IFF_OACTIVE; 909 DEVICE_DPRINTF( 910 "TX descriptor is full. dropping packet\n"); 911 m_freem(m); 912 ifp->if_oerrors++; 913 break; 914 } 915 916 /* Pass the packet to any BPF listeners */ 917 bpf_mtap(ifp, m, BPF_D_OUT); 918 } 919 920 if (npkt) { 921 /* enable TX DMA */ 922 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 923 924 ifp->if_timer = 5; 925 } 926 } 927 928 static void 929 enet_stop(struct ifnet *ifp, int disable) 930 { 931 struct enet_softc *sc; 932 int s; 933 uint32_t v; 934 935 sc = ifp->if_softc; 936 937 s = splnet(); 938 939 sc->sc_stopping = true; 940 callout_stop(&sc->sc_tick_ch); 941 942 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 943 v = ENET_REG_READ(sc, ENET_ECR); 944 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 945 946 /* Mark the interface as down and cancel the watchdog timer. */ 947 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 948 ifp->if_timer = 0; 949 950 if (disable) { 951 enet_drain_txbuf(sc); 952 enet_drain_rxbuf(sc); 953 } 954 955 splx(s); 956 } 957 958 static void 959 enet_watchdog(struct ifnet *ifp) 960 { 961 struct enet_softc *sc; 962 int s; 963 964 sc = ifp->if_softc; 965 s = splnet(); 966 967 device_printf(sc->sc_dev, "watchdog timeout\n"); 968 ifp->if_oerrors++; 969 970 /* salvage packets left in descriptors */ 971 enet_tx_intr(sc); 972 enet_rx_intr(sc); 973 974 /* reset */ 975 enet_stop(ifp, 1); 976 enet_init(ifp); 977 978 splx(s); 979 } 980 981 static void 982 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 983 { 984 struct enet_softc *sc = ifp->if_softc; 985 986 ether_mediastatus(ifp, ifmr); 987 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 988 | sc->sc_flowflags; 989 } 990 991 static int 992 enet_ifflags_cb(struct ethercom *ec) 993 { 994 struct ifnet *ifp = &ec->ec_if; 995 struct enet_softc *sc = ifp->if_softc; 996 int change = ifp->if_flags ^ sc->sc_if_flags; 997 998 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 999 return ENETRESET; 1000 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1001 return 0; 1002 1003 enet_setmulti(sc); 1004 1005 sc->sc_if_flags = ifp->if_flags; 1006 return 0; 1007 } 1008 1009 static int 1010 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1011 { 1012 struct enet_softc *sc; 1013 struct ifreq *ifr; 1014 int s, error; 1015 uint32_t v; 1016 1017 sc = ifp->if_softc; 1018 ifr = data; 1019 1020 error = 0; 1021 1022 s = splnet(); 1023 1024 switch (command) { 1025 case SIOCSIFMTU: 1026 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1027 error = EINVAL; 1028 } else { 1029 ifp->if_mtu = ifr->ifr_mtu; 1030 1031 /* set maximum frame length */ 1032 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1033 ENET_REG_WRITE(sc, ENET_FTRL, v); 1034 v = ENET_REG_READ(sc, ENET_RCR); 1035 v &= ~ENET_RCR_MAX_FL(0x3fff); 1036 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 1037 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1038 ENET_REG_WRITE(sc, ENET_RCR, v); 1039 } 1040 break; 1041 case SIOCSIFMEDIA: 1042 /* Flow control requires full-duplex mode. */ 1043 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1044 (ifr->ifr_media & IFM_FDX) == 0) 1045 ifr->ifr_media &= ~IFM_ETH_FMASK; 1046 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1047 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1048 /* We can do both TXPAUSE and RXPAUSE. */ 1049 ifr->ifr_media |= 1050 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1051 } 1052 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1053 } 1054 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1055 break; 1056 default: 1057 error = ether_ioctl(ifp, command, data); 1058 if (error != ENETRESET) 1059 break; 1060 1061 /* post-process */ 1062 error = 0; 1063 switch (command) { 1064 case SIOCSIFCAP: 1065 error = (*ifp->if_init)(ifp); 1066 break; 1067 case SIOCADDMULTI: 1068 case SIOCDELMULTI: 1069 if (ifp->if_flags & IFF_RUNNING) 1070 enet_setmulti(sc); 1071 break; 1072 } 1073 break; 1074 } 1075 1076 splx(s); 1077 1078 return error; 1079 } 1080 1081 /* 1082 * for MII 1083 */ 1084 static int 1085 enet_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1086 { 1087 struct enet_softc *sc; 1088 int timeout; 1089 uint32_t status; 1090 1091 sc = device_private(dev); 1092 1093 /* clear MII update */ 1094 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1095 1096 /* read command */ 1097 ENET_REG_WRITE(sc, ENET_MMFR, 1098 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1099 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1100 1101 /* check MII update */ 1102 for (timeout = 5000; timeout > 0; --timeout) { 1103 status = ENET_REG_READ(sc, ENET_EIR); 1104 if (status & ENET_EIR_MII) 1105 break; 1106 } 1107 if (timeout <= 0) { 1108 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1109 reg); 1110 return ETIMEDOUT; 1111 } else 1112 *val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1113 1114 return 0; 1115 } 1116 1117 static int 1118 enet_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1119 { 1120 struct enet_softc *sc; 1121 int timeout; 1122 1123 sc = device_private(dev); 1124 1125 /* clear MII update */ 1126 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1127 1128 /* write command */ 1129 ENET_REG_WRITE(sc, ENET_MMFR, 1130 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1131 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1132 (ENET_MMFR_DATAMASK & val)); 1133 1134 /* check MII update */ 1135 for (timeout = 5000; timeout > 0; --timeout) { 1136 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1137 break; 1138 } 1139 if (timeout <= 0) { 1140 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", reg); 1141 return ETIMEDOUT; 1142 } 1143 1144 return 0; 1145 } 1146 1147 static void 1148 enet_miibus_statchg(struct ifnet *ifp) 1149 { 1150 struct enet_softc *sc; 1151 struct mii_data *mii; 1152 struct ifmedia_entry *ife; 1153 uint32_t ecr, ecr0; 1154 uint32_t rcr, rcr0; 1155 uint32_t tcr, tcr0; 1156 1157 sc = ifp->if_softc; 1158 mii = &sc->sc_mii; 1159 ife = mii->mii_media.ifm_cur; 1160 1161 /* get current status */ 1162 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1163 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1164 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1165 1166 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1167 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1168 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1169 mii->mii_media_active &= ~IFM_ETH_FMASK; 1170 } 1171 1172 if ((ife->ifm_media & IFM_FDX) != 0) { 1173 tcr |= ENET_TCR_FDEN; /* full duplex */ 1174 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1175 } else { 1176 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1177 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1178 } 1179 1180 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1181 /* 1182 * need to reset because 1183 * FDEN can change when ECR[ETHEREN] is 0 1184 */ 1185 enet_init_regs(sc, 0); 1186 return; 1187 } 1188 1189 switch (IFM_SUBTYPE(ife->ifm_media)) { 1190 case IFM_AUTO: 1191 case IFM_1000_T: 1192 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1193 rcr &= ~ENET_RCR_RMII_10T; 1194 break; 1195 case IFM_100_TX: 1196 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1197 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1198 break; 1199 case IFM_10_T: 1200 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1201 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1202 break; 1203 default: 1204 ecr = ecr0; 1205 rcr = rcr0; 1206 tcr = tcr0; 1207 break; 1208 } 1209 1210 if (sc->sc_rgmii == 0) 1211 ecr &= ~ENET_ECR_SPEED; 1212 1213 if (sc->sc_flowflags & IFM_FLOW) 1214 rcr |= ENET_RCR_FCE; 1215 else 1216 rcr &= ~ENET_RCR_FCE; 1217 1218 /* update registers if need change */ 1219 if (ecr != ecr0) 1220 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1221 if (rcr != rcr0) 1222 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1223 if (tcr != tcr0) 1224 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1225 } 1226 1227 /* 1228 * handling descriptors 1229 */ 1230 static void 1231 enet_init_txring(struct enet_softc *sc) 1232 { 1233 int i; 1234 1235 /* build TX ring */ 1236 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1237 sc->sc_txdesc_ring[i].tx_flags1_len = 1238 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1239 sc->sc_txdesc_ring[i].tx_databuf = 0; 1240 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1241 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1242 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1243 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1244 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1245 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1246 1247 TXDESC_WRITEOUT(i); 1248 } 1249 1250 sc->sc_tx_free = ENET_TX_RING_CNT; 1251 sc->sc_tx_considx = 0; 1252 sc->sc_tx_prodidx = 0; 1253 } 1254 1255 static int 1256 enet_init_rxring(struct enet_softc *sc) 1257 { 1258 int i, error; 1259 1260 /* build RX ring */ 1261 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1262 error = enet_alloc_rxbuf(sc, i); 1263 if (error != 0) 1264 return error; 1265 } 1266 1267 sc->sc_rx_readidx = 0; 1268 1269 return 0; 1270 } 1271 1272 static int 1273 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1274 { 1275 struct mbuf *m; 1276 int error; 1277 1278 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1279 1280 /* free mbuf if already allocated */ 1281 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1282 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1283 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1284 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1285 } 1286 1287 /* allocate new mbuf cluster */ 1288 MGETHDR(m, M_DONTWAIT, MT_DATA); 1289 if (m == NULL) 1290 return ENOBUFS; 1291 MCLGET(m, M_DONTWAIT); 1292 if (!(m->m_flags & M_EXT)) { 1293 m_freem(m); 1294 return ENOBUFS; 1295 } 1296 m->m_len = MCLBYTES; 1297 m->m_next = NULL; 1298 1299 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1300 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1301 BUS_DMA_READ | BUS_DMA_NOWAIT); 1302 if (error) { 1303 m_freem(m); 1304 return error; 1305 } 1306 1307 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1308 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1309 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1310 1311 sc->sc_rxsoft[idx].rxs_mbuf = m; 1312 enet_reset_rxdesc(sc, idx); 1313 return 0; 1314 } 1315 1316 static void 1317 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1318 { 1319 uint32_t paddr; 1320 1321 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1322 1323 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1324 RXFLAGS1_E | 1325 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1326 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1327 sc->sc_rxdesc_ring[idx].rx_flags2 = 1328 RXFLAGS2_INT; 1329 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1330 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1331 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1332 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1333 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1334 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1335 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1336 1337 RXDESC_WRITEOUT(idx); 1338 } 1339 1340 static void 1341 enet_drain_txbuf(struct enet_softc *sc) 1342 { 1343 int idx; 1344 struct enet_txsoft *txs; 1345 struct ifnet *ifp; 1346 1347 ifp = &sc->sc_ethercom.ec_if; 1348 1349 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1350 idx = ENET_TX_NEXTIDX(idx)) { 1351 1352 /* txsoft[] is used only first segment */ 1353 txs = &sc->sc_txsoft[idx]; 1354 TXDESC_READIN(idx); 1355 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1356 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1357 bus_dmamap_unload(sc->sc_dmat, 1358 txs->txs_dmamap); 1359 m_freem(txs->txs_mbuf); 1360 1361 ifp->if_oerrors++; 1362 } 1363 sc->sc_tx_free++; 1364 } 1365 } 1366 1367 static void 1368 enet_drain_rxbuf(struct enet_softc *sc) 1369 { 1370 int i; 1371 1372 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1373 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1374 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1375 bus_dmamap_unload(sc->sc_dmat, 1376 sc->sc_rxsoft[i].rxs_dmamap); 1377 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1378 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1379 } 1380 } 1381 } 1382 1383 static int 1384 enet_alloc_ring(struct enet_softc *sc) 1385 { 1386 int i, error; 1387 1388 /* 1389 * build DMA maps for TX. 1390 * TX descriptor must be able to contain mbuf chains, 1391 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1392 */ 1393 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1394 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1395 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1396 &sc->sc_txsoft[i].txs_dmamap); 1397 1398 if (error) { 1399 aprint_error_dev(sc->sc_dev, 1400 "can't create DMA map for TX descs\n"); 1401 goto fail_1; 1402 } 1403 } 1404 1405 /* 1406 * build DMA maps for RX. 1407 * RX descripter contains An mbuf cluster, 1408 * and make up a dmamap. 1409 */ 1410 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1411 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1412 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1413 &sc->sc_rxsoft[i].rxs_dmamap); 1414 if (error) { 1415 aprint_error_dev(sc->sc_dev, 1416 "can't create DMA map for RX descs\n"); 1417 goto fail_2; 1418 } 1419 } 1420 1421 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1422 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1423 return -1; 1424 memset(sc->sc_txdesc_ring, 0, 1425 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1426 1427 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1428 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1429 return -1; 1430 memset(sc->sc_rxdesc_ring, 0, 1431 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1432 1433 return 0; 1434 1435 fail_2: 1436 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1437 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1438 bus_dmamap_destroy(sc->sc_dmat, 1439 sc->sc_rxsoft[i].rxs_dmamap); 1440 } 1441 fail_1: 1442 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1443 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1444 bus_dmamap_destroy(sc->sc_dmat, 1445 sc->sc_txsoft[i].txs_dmamap); 1446 } 1447 return error; 1448 } 1449 1450 static int 1451 enet_encap_mbufalign(struct mbuf **mp) 1452 { 1453 struct mbuf *m, *m0, *mt, *p, *x; 1454 void *ap; 1455 uint32_t alignoff, chiplen; 1456 1457 /* 1458 * iMX6 SoC ethernet controller requires 1459 * address of buffer must aligned 8, and 1460 * length of buffer must be greater than 10 (first fragment only?) 1461 */ 1462 #define ALIGNBYTE 8 1463 #define MINBUFSIZE 10 1464 #define ALIGN_PTR(p, align) \ 1465 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1466 1467 m0 = *mp; 1468 mt = p = NULL; 1469 for (m = m0; m != NULL; m = m->m_next) { 1470 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1471 if (m->m_len < (ALIGNBYTE * 2)) { 1472 /* 1473 * rearrange mbuf data aligned 1474 * 1475 * align 8 * * * * * 1476 * +0123456789abcdef0123456789abcdef0 1477 * FROM m->m_data[___________abcdefghijklmn_______] 1478 * 1479 * +0123456789abcdef0123456789abcdef0 1480 * TO m->m_data[________abcdefghijklm___________] or 1481 * m->m_data[________________abcdefghijklmn__] 1482 */ 1483 if ((alignoff != 0) && (m->m_len != 0)) { 1484 chiplen = ALIGNBYTE - alignoff; 1485 if (M_LEADINGSPACE(m) >= alignoff) { 1486 ap = m->m_data - alignoff; 1487 memmove(ap, m->m_data, m->m_len); 1488 m->m_data = ap; 1489 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1490 ap = m->m_data + chiplen; 1491 memmove(ap, m->m_data, m->m_len); 1492 m->m_data = ap; 1493 } else { 1494 /* 1495 * no space to align data. (M_READONLY?) 1496 * allocate new mbuf aligned, 1497 * and copy to it. 1498 */ 1499 MGET(x, M_DONTWAIT, m->m_type); 1500 if (x == NULL) { 1501 m_freem(m); 1502 return ENOBUFS; 1503 } 1504 MCLAIM(x, m->m_owner); 1505 if (m->m_flags & M_PKTHDR) 1506 m_move_pkthdr(x, m); 1507 x->m_len = m->m_len; 1508 x->m_data = ALIGN_PTR(x->m_data, 1509 ALIGNBYTE); 1510 memcpy(mtod(x, void *), mtod(m, void *), 1511 m->m_len); 1512 p->m_next = x; 1513 x->m_next = m_free(m); 1514 m = x; 1515 } 1516 } 1517 1518 /* 1519 * fill 1st mbuf at least 10byte 1520 * 1521 * align 8 * * * * * 1522 * +0123456789abcdef0123456789abcdef0 1523 * FROM m->m_data[________abcde___________________] 1524 * m->m_data[__fg____________________________] 1525 * m->m_data[_________________hi_____________] 1526 * m->m_data[__________jk____________________] 1527 * m->m_data[____l___________________________] 1528 * 1529 * +0123456789abcdef0123456789abcdef0 1530 * TO m->m_data[________abcdefghij______________] 1531 * m->m_data[________________________________] 1532 * m->m_data[________________________________] 1533 * m->m_data[___________k____________________] 1534 * m->m_data[____l___________________________] 1535 */ 1536 if (mt == NULL) { 1537 mt = m; 1538 while (mt->m_len == 0) { 1539 mt = mt->m_next; 1540 if (mt == NULL) { 1541 m_freem(m); 1542 return ENOBUFS; 1543 } 1544 } 1545 1546 /* mt = 1st mbuf, x = 2nd mbuf */ 1547 x = mt->m_next; 1548 while (mt->m_len < MINBUFSIZE) { 1549 if (x == NULL) { 1550 m_freem(m); 1551 return ENOBUFS; 1552 } 1553 1554 alignoff = (uintptr_t)x->m_data & 1555 (ALIGNBYTE - 1); 1556 chiplen = ALIGNBYTE - alignoff; 1557 if (chiplen > x->m_len) { 1558 chiplen = x->m_len; 1559 } else if ((mt->m_len + chiplen) < 1560 MINBUFSIZE) { 1561 /* 1562 * next mbuf should be greater 1563 * than ALIGNBYTE? 1564 */ 1565 if (x->m_len >= (chiplen + 1566 ALIGNBYTE * 2)) 1567 chiplen += ALIGNBYTE; 1568 else 1569 chiplen = x->m_len; 1570 } 1571 1572 if (chiplen && 1573 (M_TRAILINGSPACE(mt) < chiplen)) { 1574 /* 1575 * move data to the begining of 1576 * m_dat[] (aligned) to en- 1577 * large trailingspace 1578 */ 1579 if (mt->m_flags & M_EXT) { 1580 ap = mt->m_ext.ext_buf; 1581 } else if (mt->m_flags & 1582 M_PKTHDR) { 1583 ap = mt->m_pktdat; 1584 } else { 1585 ap = mt->m_dat; 1586 } 1587 ap = ALIGN_PTR(ap, ALIGNBYTE); 1588 memcpy(ap, mt->m_data, 1589 mt->m_len); 1590 mt->m_data = ap; 1591 } 1592 1593 if (chiplen && 1594 (M_TRAILINGSPACE(mt) >= chiplen)) { 1595 memcpy(mt->m_data + mt->m_len, 1596 x->m_data, chiplen); 1597 mt->m_len += chiplen; 1598 m_adj(x, chiplen); 1599 } 1600 1601 x = x->m_next; 1602 } 1603 } 1604 1605 } else { 1606 mt = m; 1607 1608 /* 1609 * allocate new mbuf x, and rearrange as below; 1610 * 1611 * align 8 * * * * * 1612 * +0123456789abcdef0123456789abcdef0 1613 * FROM m->m_data[____________abcdefghijklmnopq___] 1614 * 1615 * +0123456789abcdef0123456789abcdef0 1616 * TO x->m_data[________abcdefghijkl____________] 1617 * m->m_data[________________________mnopq___] 1618 * 1619 */ 1620 if (alignoff != 0) { 1621 /* at least ALIGNBYTE */ 1622 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1623 1624 MGET(x, M_DONTWAIT, m->m_type); 1625 if (x == NULL) { 1626 m_freem(m); 1627 return ENOBUFS; 1628 } 1629 MCLAIM(x, m->m_owner); 1630 if (m->m_flags & M_PKTHDR) 1631 m_move_pkthdr(x, m); 1632 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1633 memcpy(mtod(x, void *), mtod(m, void *), 1634 chiplen); 1635 x->m_len = chiplen; 1636 x->m_next = m; 1637 m_adj(m, chiplen); 1638 1639 if (p == NULL) 1640 m0 = x; 1641 else 1642 p->m_next = x; 1643 } 1644 } 1645 p = m; 1646 } 1647 *mp = m0; 1648 1649 return 0; 1650 } 1651 1652 static int 1653 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1654 { 1655 bus_dmamap_t map; 1656 struct mbuf *m; 1657 int csumflags, idx, i, error; 1658 uint32_t flags1, flags2; 1659 1660 idx = sc->sc_tx_prodidx; 1661 map = sc->sc_txsoft[idx].txs_dmamap; 1662 1663 /* align mbuf data for claim of ENET */ 1664 error = enet_encap_mbufalign(mp); 1665 if (error != 0) 1666 return error; 1667 1668 m = *mp; 1669 csumflags = m->m_pkthdr.csum_flags; 1670 1671 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1672 BUS_DMA_NOWAIT); 1673 if (error != 0) { 1674 device_printf(sc->sc_dev, 1675 "Error mapping mbuf into TX chain: error=%d\n", error); 1676 m_freem(m); 1677 return error; 1678 } 1679 1680 if (map->dm_nsegs > sc->sc_tx_free) { 1681 bus_dmamap_unload(sc->sc_dmat, map); 1682 device_printf(sc->sc_dev, 1683 "too many mbuf chain %d\n", map->dm_nsegs); 1684 m_freem(m); 1685 return ENOBUFS; 1686 } 1687 1688 /* fill protocol cksum zero beforehand */ 1689 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1690 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1691 int ehlen; 1692 uint16_t etype; 1693 1694 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1695 switch (ntohs(etype)) { 1696 case ETHERTYPE_IP: 1697 case ETHERTYPE_IPV6: 1698 ehlen = ETHER_HDR_LEN; 1699 break; 1700 case ETHERTYPE_VLAN: 1701 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1702 break; 1703 default: 1704 ehlen = 0; 1705 break; 1706 } 1707 1708 if (ehlen) { 1709 const int off = 1710 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1711 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1712 if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1713 uint16_t zero = 0; 1714 m_copyback(m, ehlen + off, sizeof(zero), &zero); 1715 } 1716 } 1717 } 1718 1719 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1720 BUS_DMASYNC_PREWRITE); 1721 1722 for (i = 0; i < map->dm_nsegs; i++) { 1723 flags1 = TXFLAGS1_R; 1724 flags2 = 0; 1725 1726 if (i == 0) { 1727 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1728 sc->sc_txsoft[idx].txs_mbuf = m; 1729 } 1730 1731 /* checksum offloading */ 1732 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1733 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1734 flags2 |= TXFLAGS2_PINS; 1735 if (csumflags & (M_CSUM_IPv4)) 1736 flags2 |= TXFLAGS2_IINS; 1737 1738 if (i == map->dm_nsegs - 1) { 1739 /* mark last segment */ 1740 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1741 flags2 |= TXFLAGS2_INT; 1742 } 1743 if (idx == ENET_TX_RING_CNT - 1) { 1744 /* mark end of ring */ 1745 flags1 |= TXFLAGS1_W; 1746 } 1747 1748 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1749 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1750 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1751 TXDESC_WRITEOUT(idx); 1752 1753 sc->sc_txdesc_ring[idx].tx_flags1_len = 1754 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1755 TXDESC_WRITEOUT(idx); 1756 1757 idx = ENET_TX_NEXTIDX(idx); 1758 sc->sc_tx_free--; 1759 } 1760 1761 sc->sc_tx_prodidx = idx; 1762 1763 return 0; 1764 } 1765 1766 /* 1767 * device initialize 1768 */ 1769 static int 1770 enet_init_regs(struct enet_softc *sc, int init) 1771 { 1772 struct mii_data *mii; 1773 struct ifmedia_entry *ife; 1774 paddr_t paddr; 1775 uint32_t val; 1776 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1777 1778 if (init) { 1779 fulldup = 1; 1780 ecr_speed = ENET_ECR_SPEED; 1781 rcr_speed = 0; 1782 flowctrl = 0; 1783 } else { 1784 mii = &sc->sc_mii; 1785 ife = mii->mii_media.ifm_cur; 1786 1787 if ((ife->ifm_media & IFM_FDX) != 0) 1788 fulldup = 1; 1789 else 1790 fulldup = 0; 1791 1792 switch (IFM_SUBTYPE(ife->ifm_media)) { 1793 case IFM_10_T: 1794 ecr_speed = 0; 1795 rcr_speed = ENET_RCR_RMII_10T; 1796 break; 1797 case IFM_100_TX: 1798 ecr_speed = 0; 1799 rcr_speed = 0; 1800 break; 1801 default: 1802 ecr_speed = ENET_ECR_SPEED; 1803 rcr_speed = 0; 1804 break; 1805 } 1806 1807 flowctrl = sc->sc_flowflags & IFM_FLOW; 1808 } 1809 1810 if (sc->sc_rgmii == 0) 1811 ecr_speed = 0; 1812 1813 /* reset */ 1814 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1815 1816 /* mask and clear all interrupt */ 1817 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1818 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1819 1820 /* full duplex */ 1821 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1822 1823 /* clear and enable MIB register */ 1824 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1825 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1826 1827 /* MII speed setup. MDCclk(=2.5MHz) = ENET_PLL/((val+1)*2) */ 1828 val = ((sc->sc_pllclock) / 500000 - 1) / 10; 1829 ENET_REG_WRITE(sc, ENET_MSCR, val << 1); 1830 1831 /* Opcode/Pause Duration */ 1832 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1833 1834 /* Receive FIFO */ 1835 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1836 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1837 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1838 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1839 1840 /* Transmit FIFO */ 1841 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1842 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1843 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1844 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1845 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1846 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1847 1848 /* hardware checksum is default off (override in TX descripter) */ 1849 ENET_REG_WRITE(sc, ENET_TACC, 0); 1850 1851 /* 1852 * align ethernet payload on 32bit, discard frames with MAC layer error, 1853 * and don't discard checksum error 1854 */ 1855 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1856 1857 /* maximum frame size */ 1858 val = ENET_DEFAULT_PKT_LEN; 1859 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1860 1861 if (sc->sc_rgmii == 0) 1862 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1863 else 1864 miimode = ENET_RCR_RGMII_EN; 1865 ENET_REG_WRITE(sc, ENET_RCR, 1866 ENET_RCR_PADEN | /* RX frame padding remove */ 1867 miimode | 1868 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1869 rcr_speed | 1870 (fulldup ? 0 : ENET_RCR_DRT) | 1871 ENET_RCR_MAX_FL(val)); 1872 1873 /* Maximum Receive BufSize per one descriptor */ 1874 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1875 1876 1877 /* TX/RX Descriptor Physical Address */ 1878 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1879 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1880 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1881 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1882 /* sync cache */ 1883 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1884 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1885 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1886 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1887 1888 /* enable interrupts */ 1889 val = ENET_EIMR | ENET_EIR_TXF | ENET_EIR_RXF | ENET_EIR_EBERR; 1890 if (sc->sc_imxtype == 7) 1891 val |= ENET_EIR_TXF2 | ENET_EIR_RXF2 | ENET_EIR_TXF1 | 1892 ENET_EIR_RXF1; 1893 ENET_REG_WRITE(sc, ENET_EIMR, val); 1894 1895 /* enable ether */ 1896 ENET_REG_WRITE(sc, ENET_ECR, 1897 #if _BYTE_ORDER == _LITTLE_ENDIAN 1898 ENET_ECR_DBSWP | 1899 #endif 1900 ecr_speed | 1901 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1902 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1903 1904 return 0; 1905 } 1906 1907 static int 1908 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1909 bus_dmamap_t *mapp) 1910 { 1911 bus_dma_segment_t seglist[1]; 1912 int nsegs, error; 1913 1914 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1915 1, &nsegs, M_NOWAIT)) != 0) { 1916 device_printf(sc->sc_dev, 1917 "unable to allocate DMA buffer, error=%d\n", error); 1918 goto fail_alloc; 1919 } 1920 1921 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1922 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1923 device_printf(sc->sc_dev, 1924 "unable to map DMA buffer, error=%d\n", 1925 error); 1926 goto fail_map; 1927 } 1928 1929 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1930 BUS_DMA_NOWAIT, mapp)) != 0) { 1931 device_printf(sc->sc_dev, 1932 "unable to create DMA map, error=%d\n", error); 1933 goto fail_create; 1934 } 1935 1936 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1937 BUS_DMA_NOWAIT)) != 0) { 1938 aprint_error_dev(sc->sc_dev, 1939 "unable to load DMA map, error=%d\n", error); 1940 goto fail_load; 1941 } 1942 1943 return 0; 1944 1945 fail_load: 1946 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1947 fail_create: 1948 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1949 fail_map: 1950 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1951 fail_alloc: 1952 return error; 1953 } 1954