1 /* $NetBSD: if_enet.c,v 1.8 2016/06/15 07:26:11 ryo Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.8 2016/06/15 07:26:11 ryo Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 96 #define ENET_RX_NEXTIDX(idx) (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 98 #define TXDESC_WRITEOUT(idx) \ 99 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 100 sizeof(struct enet_txdesc) * (idx), \ 101 sizeof(struct enet_txdesc), \ 102 BUS_DMASYNC_PREWRITE) 103 104 #define TXDESC_READIN(idx) \ 105 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 106 sizeof(struct enet_txdesc) * (idx), \ 107 sizeof(struct enet_txdesc), \ 108 BUS_DMASYNC_PREREAD) 109 110 #define RXDESC_WRITEOUT(idx) \ 111 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 112 sizeof(struct enet_rxdesc) * (idx), \ 113 sizeof(struct enet_rxdesc), \ 114 BUS_DMASYNC_PREWRITE) 115 116 #define RXDESC_READIN(idx) \ 117 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 118 sizeof(struct enet_rxdesc) * (idx), \ 119 sizeof(struct enet_rxdesc), \ 120 BUS_DMASYNC_PREREAD) 121 122 #define ENET_REG_READ(sc, reg) \ 123 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 124 125 #define ENET_REG_WRITE(sc, reg, value) \ 126 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 127 128 #ifdef ENET_EVENT_COUNTER 129 static void enet_attach_evcnt(struct enet_softc *); 130 static void enet_update_evcnt(struct enet_softc *); 131 #endif 132 133 static int enet_intr(void *); 134 static void enet_tick(void *); 135 static int enet_tx_intr(void *); 136 static int enet_rx_intr(void *); 137 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 138 int); 139 140 static void enet_start(struct ifnet *); 141 static int enet_ifflags_cb(struct ethercom *); 142 static int enet_ioctl(struct ifnet *, u_long, void *); 143 static int enet_init(struct ifnet *); 144 static void enet_stop(struct ifnet *, int); 145 static void enet_watchdog(struct ifnet *); 146 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 147 148 static int enet_miibus_readreg(device_t, int, int); 149 static void enet_miibus_writereg(device_t, int, int, int); 150 static void enet_miibus_statchg(struct ifnet *); 151 152 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 153 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 154 static void enet_setmulti(struct enet_softc *); 155 static int enet_encap_mbufalign(struct mbuf **); 156 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 157 static int enet_init_regs(struct enet_softc *, int); 158 static int enet_alloc_ring(struct enet_softc *); 159 static void enet_init_txring(struct enet_softc *); 160 static int enet_init_rxring(struct enet_softc *); 161 static void enet_reset_rxdesc(struct enet_softc *, int); 162 static int enet_alloc_rxbuf(struct enet_softc *, int); 163 static void enet_drain_txbuf(struct enet_softc *); 164 static void enet_drain_rxbuf(struct enet_softc *); 165 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 166 bus_dmamap_t *); 167 168 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 169 enet_match, enet_attach, NULL, NULL); 170 171 void 172 enet_attach_common(device_t self, bus_space_tag_t iot, 173 bus_dma_tag_t dmat, bus_addr_t addr, bus_size_t size, int irq) 174 { 175 struct enet_softc *sc; 176 struct ifnet *ifp; 177 178 sc = device_private(self); 179 sc->sc_dev = self; 180 sc->sc_iot = iot; 181 sc->sc_addr = addr; 182 sc->sc_dmat = dmat; 183 184 aprint_naive("\n"); 185 aprint_normal(": Gigabit Ethernet Controller\n"); 186 if (bus_space_map(sc->sc_iot, sc->sc_addr, size, 0, 187 &sc->sc_ioh)) { 188 aprint_error_dev(self, "cannot map registers\n"); 189 return; 190 } 191 192 /* allocate dma buffer */ 193 if (enet_alloc_ring(sc)) 194 return; 195 196 #define IS_ENADDR_ZERO(enaddr) \ 197 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 198 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 199 200 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 201 /* by any chance, mac-address is already set by bootloader? */ 202 enet_gethwaddr(sc, sc->sc_enaddr); 203 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 204 /* give up. set randomly */ 205 uint32_t eaddr = random(); 206 /* not multicast */ 207 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 208 sc->sc_enaddr[1] = eaddr >> 16; 209 sc->sc_enaddr[2] = eaddr >> 8; 210 sc->sc_enaddr[3] = eaddr; 211 eaddr = random(); 212 sc->sc_enaddr[4] = eaddr >> 8; 213 sc->sc_enaddr[5] = eaddr; 214 215 aprint_error_dev(self, 216 "cannot get mac address. set randomly\n"); 217 } 218 } 219 enet_sethwaddr(sc, sc->sc_enaddr); 220 221 aprint_normal_dev(self, "Ethernet address %s\n", 222 ether_sprintf(sc->sc_enaddr)); 223 224 enet_init_regs(sc, 1); 225 226 /* setup interrupt handlers */ 227 if ((sc->sc_ih = intr_establish(irq, IPL_NET, 228 IST_LEVEL, enet_intr, sc)) == NULL) { 229 aprint_error_dev(self, "unable to establish interrupt\n"); 230 goto failure; 231 } 232 233 if (sc->sc_imxtype == 7) { 234 /* i.MX7 use 3 interrupts */ 235 if ((sc->sc_ih2 = intr_establish(irq + 1, IPL_NET, 236 IST_LEVEL, enet_intr, sc)) == NULL) { 237 aprint_error_dev(self, "unable to establish 2nd interrupt\n"); 238 intr_disestablish(sc->sc_ih); 239 goto failure; 240 } 241 if ((sc->sc_ih3 = intr_establish(irq + 2, IPL_NET, 242 IST_LEVEL, enet_intr, sc)) == NULL) { 243 aprint_error_dev(self, "unable to establish 3rd interrupt\n"); 244 intr_disestablish(sc->sc_ih2); 245 intr_disestablish(sc->sc_ih); 246 goto failure; 247 } 248 } 249 250 /* callout will be scheduled from enet_init() */ 251 callout_init(&sc->sc_tick_ch, 0); 252 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 253 254 /* setup ifp */ 255 ifp = &sc->sc_ethercom.ec_if; 256 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 257 ifp->if_softc = sc; 258 ifp->if_mtu = ETHERMTU; 259 ifp->if_baudrate = IF_Gbps(1); 260 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 261 ifp->if_ioctl = enet_ioctl; 262 ifp->if_start = enet_start; 263 ifp->if_init = enet_init; 264 ifp->if_stop = enet_stop; 265 ifp->if_watchdog = enet_watchdog; 266 267 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 268 #ifdef ENET_SUPPORT_JUMBO 269 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 270 #endif 271 272 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 273 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 274 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 275 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 276 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 277 278 IFQ_SET_MAXLEN(&ifp->if_snd, max(ENET_TX_RING_CNT, IFQ_MAXLEN)); 279 IFQ_SET_READY(&ifp->if_snd); 280 281 /* setup MII */ 282 sc->sc_ethercom.ec_mii = &sc->sc_mii; 283 sc->sc_mii.mii_ifp = ifp; 284 sc->sc_mii.mii_readreg = enet_miibus_readreg; 285 sc->sc_mii.mii_writereg = enet_miibus_writereg; 286 sc->sc_mii.mii_statchg = enet_miibus_statchg; 287 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 288 enet_mediastatus); 289 290 /* try to attach PHY */ 291 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 292 MII_OFFSET_ANY, 0); 293 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 294 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 295 0, NULL); 296 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 297 } else { 298 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 299 } 300 301 if_attach(ifp); 302 ether_ifattach(ifp, sc->sc_enaddr); 303 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 304 305 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 306 RND_TYPE_NET, RND_FLAG_DEFAULT); 307 308 #ifdef ENET_EVENT_COUNTER 309 enet_attach_evcnt(sc); 310 #endif 311 312 sc->sc_stopping = false; 313 314 return; 315 316 failure: 317 bus_space_unmap(sc->sc_iot, sc->sc_ioh, size); 318 return; 319 } 320 321 #ifdef ENET_EVENT_COUNTER 322 static void 323 enet_attach_evcnt(struct enet_softc *sc) 324 { 325 const char *xname; 326 327 xname = device_xname(sc->sc_dev); 328 329 #define ENET_EVCNT_ATTACH(name) \ 330 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 331 NULL, xname, #name); 332 333 ENET_EVCNT_ATTACH(t_drop); 334 ENET_EVCNT_ATTACH(t_packets); 335 ENET_EVCNT_ATTACH(t_bc_pkt); 336 ENET_EVCNT_ATTACH(t_mc_pkt); 337 ENET_EVCNT_ATTACH(t_crc_align); 338 ENET_EVCNT_ATTACH(t_undersize); 339 ENET_EVCNT_ATTACH(t_oversize); 340 ENET_EVCNT_ATTACH(t_frag); 341 ENET_EVCNT_ATTACH(t_jab); 342 ENET_EVCNT_ATTACH(t_col); 343 ENET_EVCNT_ATTACH(t_p64); 344 ENET_EVCNT_ATTACH(t_p65to127n); 345 ENET_EVCNT_ATTACH(t_p128to255n); 346 ENET_EVCNT_ATTACH(t_p256to511); 347 ENET_EVCNT_ATTACH(t_p512to1023); 348 ENET_EVCNT_ATTACH(t_p1024to2047); 349 ENET_EVCNT_ATTACH(t_p_gte2048); 350 ENET_EVCNT_ATTACH(t_octets); 351 ENET_EVCNT_ATTACH(r_packets); 352 ENET_EVCNT_ATTACH(r_bc_pkt); 353 ENET_EVCNT_ATTACH(r_mc_pkt); 354 ENET_EVCNT_ATTACH(r_crc_align); 355 ENET_EVCNT_ATTACH(r_undersize); 356 ENET_EVCNT_ATTACH(r_oversize); 357 ENET_EVCNT_ATTACH(r_frag); 358 ENET_EVCNT_ATTACH(r_jab); 359 ENET_EVCNT_ATTACH(r_p64); 360 ENET_EVCNT_ATTACH(r_p65to127); 361 ENET_EVCNT_ATTACH(r_p128to255); 362 ENET_EVCNT_ATTACH(r_p256to511); 363 ENET_EVCNT_ATTACH(r_p512to1023); 364 ENET_EVCNT_ATTACH(r_p1024to2047); 365 ENET_EVCNT_ATTACH(r_p_gte2048); 366 ENET_EVCNT_ATTACH(r_octets); 367 } 368 369 static void 370 enet_update_evcnt(struct enet_softc *sc) 371 { 372 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 373 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 374 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 375 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 376 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 377 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 378 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 379 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 380 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 381 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 382 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 383 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 384 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 385 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 386 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 387 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 388 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 389 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 390 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 391 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 392 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 393 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 394 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 395 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 396 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 397 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 398 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 399 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 400 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 401 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 402 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 403 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 404 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 405 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 406 } 407 #endif /* ENET_EVENT_COUNTER */ 408 409 static void 410 enet_tick(void *arg) 411 { 412 struct enet_softc *sc; 413 struct mii_data *mii; 414 struct ifnet *ifp; 415 int s; 416 417 sc = arg; 418 mii = &sc->sc_mii; 419 ifp = &sc->sc_ethercom.ec_if; 420 421 s = splnet(); 422 423 if (sc->sc_stopping) 424 goto out; 425 426 #ifdef ENET_EVENT_COUNTER 427 enet_update_evcnt(sc); 428 #endif 429 430 /* update counters */ 431 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 432 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 433 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 434 435 /* clear counters */ 436 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 437 ENET_REG_WRITE(sc, ENET_MIBC, 0); 438 439 mii_tick(mii); 440 out: 441 442 if (!sc->sc_stopping) 443 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 444 445 splx(s); 446 } 447 448 static int 449 enet_intr(void *arg) 450 { 451 struct enet_softc *sc; 452 struct ifnet *ifp; 453 uint32_t status; 454 455 sc = arg; 456 status = ENET_REG_READ(sc, ENET_EIR); 457 458 if (sc->sc_imxtype == 7) { 459 if (status & (ENET_EIR_TXF|ENET_EIR_TXF1|ENET_EIR_TXF2)) 460 enet_tx_intr(arg); 461 if (status & (ENET_EIR_RXF|ENET_EIR_RXF1|ENET_EIR_RXF2)) 462 enet_rx_intr(arg); 463 } else { 464 if (status & ENET_EIR_TXF) 465 enet_tx_intr(arg); 466 if (status & ENET_EIR_RXF) 467 enet_rx_intr(arg); 468 } 469 470 if (status & ENET_EIR_EBERR) { 471 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 472 ifp = &sc->sc_ethercom.ec_if; 473 enet_stop(ifp, 1); 474 enet_init(ifp); 475 } else { 476 ENET_REG_WRITE(sc, ENET_EIR, status); 477 } 478 479 rnd_add_uint32(&sc->sc_rnd_source, status); 480 481 return 1; 482 } 483 484 static int 485 enet_tx_intr(void *arg) 486 { 487 struct enet_softc *sc; 488 struct ifnet *ifp; 489 struct enet_txsoft *txs; 490 int idx; 491 492 sc = (struct enet_softc *)arg; 493 ifp = &sc->sc_ethercom.ec_if; 494 495 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 496 idx = ENET_TX_NEXTIDX(idx)) { 497 498 txs = &sc->sc_txsoft[idx]; 499 500 TXDESC_READIN(idx); 501 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 502 /* This TX Descriptor has not been transmitted yet */ 503 break; 504 } 505 506 /* txsoft is available on first segment (TXFLAGS1_T1) */ 507 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 508 bus_dmamap_unload(sc->sc_dmat, 509 txs->txs_dmamap); 510 m_freem(txs->txs_mbuf); 511 ifp->if_opackets++; 512 } 513 514 /* checking error */ 515 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 516 uint32_t flags2; 517 518 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 519 520 if (flags2 & (TXFLAGS2_TXE | 521 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 522 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 523 #ifdef DEBUG_ENET 524 if (enet_debug) { 525 char flagsbuf[128]; 526 527 snprintb(flagsbuf, sizeof(flagsbuf), 528 "\20" "\20TRANSMIT" "\16UNDERFLOW" 529 "\15COLLISION" "\14FRAME" 530 "\13LATECOLLISION" "\12OVERFLOW", 531 flags2); 532 533 device_printf(sc->sc_dev, 534 "txdesc[%d]: transmit error: " 535 "flags2=%s\n", idx, flagsbuf); 536 } 537 #endif /* DEBUG_ENET */ 538 ifp->if_oerrors++; 539 } 540 } 541 542 sc->sc_tx_free++; 543 } 544 sc->sc_tx_considx = idx; 545 546 if (sc->sc_tx_free > 0) 547 ifp->if_flags &= ~IFF_OACTIVE; 548 549 /* 550 * No more pending TX descriptor, 551 * cancel the watchdog timer. 552 */ 553 if (sc->sc_tx_free == ENET_TX_RING_CNT) 554 ifp->if_timer = 0; 555 556 return 1; 557 } 558 559 static int 560 enet_rx_intr(void *arg) 561 { 562 struct enet_softc *sc; 563 struct ifnet *ifp; 564 struct enet_rxsoft *rxs; 565 int idx, len, amount; 566 uint32_t flags1, flags2; 567 struct mbuf *m, *m0, *mprev; 568 569 sc = arg; 570 ifp = &sc->sc_ethercom.ec_if; 571 572 m0 = mprev = NULL; 573 amount = 0; 574 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 575 576 rxs = &sc->sc_rxsoft[idx]; 577 578 RXDESC_READIN(idx); 579 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 580 /* This RX Descriptor has not been received yet */ 581 break; 582 } 583 584 /* 585 * build mbuf from RX Descriptor if needed 586 */ 587 m = rxs->rxs_mbuf; 588 rxs->rxs_mbuf = NULL; 589 590 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 591 len = RXFLAGS1_LEN(flags1); 592 593 #define RACC_SHIFT16 2 594 if (m0 == NULL) { 595 m0 = m; 596 m_adj(m0, RACC_SHIFT16); 597 len -= RACC_SHIFT16; 598 m->m_len = len; 599 amount = len; 600 } else { 601 if (flags1 & RXFLAGS1_L) 602 len = len - amount - RACC_SHIFT16; 603 604 m->m_len = len; 605 amount += len; 606 m->m_flags &= ~M_PKTHDR; 607 mprev->m_next = m; 608 } 609 mprev = m; 610 611 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 612 613 if (flags1 & RXFLAGS1_L) { 614 /* last buffer */ 615 if ((amount < ETHER_HDR_LEN) || 616 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 617 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 618 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 619 RXFLAGS2_CE)))) { 620 621 #ifdef DEBUG_ENET 622 if (enet_debug) { 623 char flags1buf[128], flags2buf[128]; 624 snprintb(flags1buf, sizeof(flags1buf), 625 "\20" "\31MISS" "\26LENGTHVIOLATION" 626 "\25NONOCTET" "\23CRC" "\22OVERRUN" 627 "\21TRUNCATED", flags1); 628 snprintb(flags2buf, sizeof(flags2buf), 629 "\20" "\40MAC" "\33PHY" 630 "\32COLLISION", flags2); 631 632 DEVICE_DPRINTF( 633 "rxdesc[%d]: receive error: " 634 "flags1=%s,flags2=%s,len=%d\n", 635 idx, flags1buf, flags2buf, amount); 636 } 637 #endif /* DEBUG_ENET */ 638 ifp->if_ierrors++; 639 m_freem(m0); 640 641 } else { 642 /* packet receive ok */ 643 ifp->if_ipackets++; 644 m_set_rcvif(m0, ifp); 645 m0->m_pkthdr.len = amount; 646 647 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 648 rxs->rxs_dmamap->dm_mapsize, 649 BUS_DMASYNC_PREREAD); 650 651 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 652 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 653 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 654 enet_rx_csum(sc, ifp, m0, idx); 655 656 /* Pass this up to any BPF listeners */ 657 bpf_mtap(ifp, m0); 658 659 if_percpuq_enqueue(ifp->if_percpuq, m0); 660 } 661 662 m0 = NULL; 663 mprev = NULL; 664 amount = 0; 665 666 } else { 667 /* continued from previous buffer */ 668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 670 } 671 672 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 673 if (enet_alloc_rxbuf(sc, idx) != 0) { 674 panic("enet_alloc_rxbuf NULL\n"); 675 } 676 } 677 sc->sc_rx_readidx = idx; 678 679 /* re-enable RX DMA to make sure */ 680 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 681 682 return 1; 683 } 684 685 static void 686 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 687 { 688 uint32_t flags2; 689 uint8_t proto; 690 691 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 692 693 if (flags2 & RXFLAGS2_IPV6) { 694 proto = sc->sc_rxdesc_ring[idx].rx_proto; 695 696 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 697 if ((proto == IPPROTO_TCP) && 698 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 699 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 700 else if ((proto == IPPROTO_UDP) && 701 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 702 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 703 else 704 return; 705 706 /* IPv6 protocol checksum error */ 707 if (flags2 & RXFLAGS2_PCR) 708 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 709 710 } else { 711 struct ether_header *eh; 712 uint8_t *ip; 713 714 eh = mtod(m, struct ether_header *); 715 716 /* XXX: is an IPv4? */ 717 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 718 return; 719 ip = (uint8_t *)(eh + 1); 720 if ((ip[0] & 0xf0) == 0x40) 721 return; 722 723 proto = sc->sc_rxdesc_ring[idx].rx_proto; 724 if (flags2 & RXFLAGS2_ICE) { 725 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 726 m->m_pkthdr.csum_flags |= 727 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 728 } 729 } else { 730 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 731 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 732 } 733 734 /* 735 * PCR is valid when 736 * ICE == 0 and FRAG == 0 737 */ 738 if (flags2 & RXFLAGS2_FRAG) 739 return; 740 741 /* 742 * PCR is valid when proto is TCP or UDP 743 */ 744 if ((proto == IPPROTO_TCP) && 745 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 746 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 747 else if ((proto == IPPROTO_UDP) && 748 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 749 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 750 else 751 return; 752 753 /* IPv4 protocol cksum error */ 754 if (flags2 & RXFLAGS2_PCR) 755 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 756 } 757 } 758 } 759 760 static void 761 enet_setmulti(struct enet_softc *sc) 762 { 763 struct ifnet *ifp; 764 struct ether_multi *enm; 765 struct ether_multistep step; 766 int promisc; 767 uint32_t crc; 768 uint32_t gaddr[2]; 769 770 ifp = &sc->sc_ethercom.ec_if; 771 772 promisc = 0; 773 if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) { 774 ifp->if_flags |= IFF_ALLMULTI; 775 if (ifp->if_flags & IFF_PROMISC) 776 promisc = 1; 777 gaddr[0] = gaddr[1] = 0xffffffff; 778 } else { 779 gaddr[0] = gaddr[1] = 0; 780 781 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 782 while (enm != NULL) { 783 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 784 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 785 ETHER_NEXT_MULTI(step, enm); 786 } 787 } 788 789 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 790 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 791 792 if (promisc) { 793 /* match all packet */ 794 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 795 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 796 } else { 797 /* don't match any packet */ 798 ENET_REG_WRITE(sc, ENET_IAUR, 0); 799 ENET_REG_WRITE(sc, ENET_IALR, 0); 800 } 801 } 802 803 static void 804 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 805 { 806 uint32_t paddr; 807 808 paddr = ENET_REG_READ(sc, ENET_PALR); 809 hwaddr[0] = paddr >> 24; 810 hwaddr[1] = paddr >> 16; 811 hwaddr[2] = paddr >> 8; 812 hwaddr[3] = paddr; 813 814 paddr = ENET_REG_READ(sc, ENET_PAUR); 815 hwaddr[4] = paddr >> 24; 816 hwaddr[5] = paddr >> 16; 817 } 818 819 static void 820 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 821 { 822 uint32_t paddr; 823 824 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 825 hwaddr[3]; 826 ENET_REG_WRITE(sc, ENET_PALR, paddr); 827 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 828 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 829 } 830 831 /* 832 * ifnet interfaces 833 */ 834 static int 835 enet_init(struct ifnet *ifp) 836 { 837 struct enet_softc *sc; 838 int s, error; 839 840 sc = ifp->if_softc; 841 842 s = splnet(); 843 844 enet_init_regs(sc, 0); 845 enet_init_txring(sc); 846 error = enet_init_rxring(sc); 847 if (error != 0) { 848 enet_drain_rxbuf(sc); 849 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 850 goto init_failure; 851 } 852 853 /* reload mac address */ 854 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 855 enet_sethwaddr(sc, sc->sc_enaddr); 856 857 /* program multicast address */ 858 enet_setmulti(sc); 859 860 /* update if_flags */ 861 ifp->if_flags |= IFF_RUNNING; 862 ifp->if_flags &= ~IFF_OACTIVE; 863 864 /* update local copy of if_flags */ 865 sc->sc_if_flags = ifp->if_flags; 866 867 /* mii */ 868 mii_mediachg(&sc->sc_mii); 869 870 /* enable RX DMA */ 871 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 872 873 sc->sc_stopping = false; 874 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 875 876 init_failure: 877 splx(s); 878 879 return error; 880 } 881 882 static void 883 enet_start(struct ifnet *ifp) 884 { 885 struct enet_softc *sc; 886 struct mbuf *m; 887 int npkt; 888 889 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 890 return; 891 892 sc = ifp->if_softc; 893 for (npkt = 0; ; npkt++) { 894 IFQ_POLL(&ifp->if_snd, m); 895 if (m == NULL) 896 break; 897 898 if (sc->sc_tx_free <= 0) { 899 /* no tx descriptor now... */ 900 ifp->if_flags |= IFF_OACTIVE; 901 DEVICE_DPRINTF("TX descriptor is full\n"); 902 break; 903 } 904 905 IFQ_DEQUEUE(&ifp->if_snd, m); 906 907 if (enet_encap_txring(sc, &m) != 0) { 908 /* too many mbuf chains? */ 909 ifp->if_flags |= IFF_OACTIVE; 910 DEVICE_DPRINTF( 911 "TX descriptor is full. dropping packet\n"); 912 m_freem(m); 913 ifp->if_oerrors++; 914 break; 915 } 916 917 /* Pass the packet to any BPF listeners */ 918 bpf_mtap(ifp, m); 919 } 920 921 if (npkt) { 922 /* enable TX DMA */ 923 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 924 925 ifp->if_timer = 5; 926 } 927 } 928 929 static void 930 enet_stop(struct ifnet *ifp, int disable) 931 { 932 struct enet_softc *sc; 933 int s; 934 uint32_t v; 935 936 sc = ifp->if_softc; 937 938 s = splnet(); 939 940 sc->sc_stopping = true; 941 callout_stop(&sc->sc_tick_ch); 942 943 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 944 v = ENET_REG_READ(sc, ENET_ECR); 945 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 946 947 /* Mark the interface as down and cancel the watchdog timer. */ 948 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 949 ifp->if_timer = 0; 950 951 if (disable) { 952 enet_drain_txbuf(sc); 953 enet_drain_rxbuf(sc); 954 } 955 956 splx(s); 957 } 958 959 static void 960 enet_watchdog(struct ifnet *ifp) 961 { 962 struct enet_softc *sc; 963 int s; 964 965 sc = ifp->if_softc; 966 s = splnet(); 967 968 device_printf(sc->sc_dev, "watchdog timeout\n"); 969 ifp->if_oerrors++; 970 971 /* salvage packets left in descriptors */ 972 enet_tx_intr(sc); 973 enet_rx_intr(sc); 974 975 /* reset */ 976 enet_stop(ifp, 1); 977 enet_init(ifp); 978 979 splx(s); 980 } 981 982 static void 983 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 984 { 985 struct enet_softc *sc = ifp->if_softc; 986 987 ether_mediastatus(ifp, ifmr); 988 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 989 | sc->sc_flowflags; 990 } 991 992 static int 993 enet_ifflags_cb(struct ethercom *ec) 994 { 995 struct ifnet *ifp = &ec->ec_if; 996 struct enet_softc *sc = ifp->if_softc; 997 int change = ifp->if_flags ^ sc->sc_if_flags; 998 999 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1000 return ENETRESET; 1001 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1002 return 0; 1003 1004 enet_setmulti(sc); 1005 1006 sc->sc_if_flags = ifp->if_flags; 1007 return 0; 1008 } 1009 1010 static int 1011 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1012 { 1013 struct enet_softc *sc; 1014 struct ifreq *ifr; 1015 int s, error; 1016 uint32_t v; 1017 1018 sc = ifp->if_softc; 1019 ifr = data; 1020 1021 error = 0; 1022 1023 s = splnet(); 1024 1025 switch (command) { 1026 case SIOCSIFMTU: 1027 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1028 error = EINVAL; 1029 } else { 1030 ifp->if_mtu = ifr->ifr_mtu; 1031 1032 /* set maximum frame length */ 1033 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1034 ENET_REG_WRITE(sc, ENET_FTRL, v); 1035 v = ENET_REG_READ(sc, ENET_RCR); 1036 v &= ~ENET_RCR_MAX_FL(0x3fff); 1037 v |= ENET_RCR_MAX_FL(ifp->if_mtu + 1038 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1039 ENET_REG_WRITE(sc, ENET_RCR, v); 1040 } 1041 break; 1042 case SIOCSIFMEDIA: 1043 case SIOCGIFMEDIA: 1044 /* Flow control requires full-duplex mode. */ 1045 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1046 (ifr->ifr_media & IFM_FDX) == 0) 1047 ifr->ifr_media &= ~IFM_ETH_FMASK; 1048 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1049 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1050 /* We can do both TXPAUSE and RXPAUSE. */ 1051 ifr->ifr_media |= 1052 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1053 } 1054 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1055 } 1056 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1057 break; 1058 default: 1059 error = ether_ioctl(ifp, command, data); 1060 if (error != ENETRESET) 1061 break; 1062 1063 /* post-process */ 1064 error = 0; 1065 switch (command) { 1066 case SIOCSIFCAP: 1067 error = (*ifp->if_init)(ifp); 1068 break; 1069 case SIOCADDMULTI: 1070 case SIOCDELMULTI: 1071 if (ifp->if_flags & IFF_RUNNING) 1072 enet_setmulti(sc); 1073 break; 1074 } 1075 break; 1076 } 1077 1078 splx(s); 1079 1080 return error; 1081 } 1082 1083 /* 1084 * for MII 1085 */ 1086 static int 1087 enet_miibus_readreg(device_t dev, int phy, int reg) 1088 { 1089 struct enet_softc *sc; 1090 int timeout; 1091 uint32_t val, status; 1092 1093 sc = device_private(dev); 1094 1095 /* clear MII update */ 1096 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1097 1098 /* read command */ 1099 ENET_REG_WRITE(sc, ENET_MMFR, 1100 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1101 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1102 1103 /* check MII update */ 1104 for (timeout = 5000; timeout > 0; --timeout) { 1105 status = ENET_REG_READ(sc, ENET_EIR); 1106 if (status & ENET_EIR_MII) 1107 break; 1108 } 1109 if (timeout <= 0) { 1110 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1111 reg); 1112 val = -1; 1113 } else { 1114 val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1115 } 1116 1117 return val; 1118 } 1119 1120 static void 1121 enet_miibus_writereg(device_t dev, int phy, int reg, int val) 1122 { 1123 struct enet_softc *sc; 1124 int timeout; 1125 1126 sc = device_private(dev); 1127 1128 /* clear MII update */ 1129 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1130 1131 /* write command */ 1132 ENET_REG_WRITE(sc, ENET_MMFR, 1133 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1134 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1135 (ENET_MMFR_DATAMASK & val)); 1136 1137 /* check MII update */ 1138 for (timeout = 5000; timeout > 0; --timeout) { 1139 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1140 break; 1141 } 1142 if (timeout <= 0) { 1143 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", 1144 reg); 1145 } 1146 } 1147 1148 static void 1149 enet_miibus_statchg(struct ifnet *ifp) 1150 { 1151 struct enet_softc *sc; 1152 struct mii_data *mii; 1153 struct ifmedia_entry *ife; 1154 uint32_t ecr, ecr0; 1155 uint32_t rcr, rcr0; 1156 uint32_t tcr, tcr0; 1157 1158 sc = ifp->if_softc; 1159 mii = &sc->sc_mii; 1160 ife = mii->mii_media.ifm_cur; 1161 1162 /* get current status */ 1163 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1164 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1165 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1166 1167 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1168 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1169 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1170 mii->mii_media_active &= ~IFM_ETH_FMASK; 1171 } 1172 1173 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1174 tcr |= ENET_TCR_FDEN; /* full duplex */ 1175 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1176 } else { 1177 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1178 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1179 } 1180 1181 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1182 /* 1183 * need to reset because 1184 * FDEN can change when ECR[ETHEREN] is 0 1185 */ 1186 enet_init_regs(sc, 0); 1187 return; 1188 } 1189 1190 switch (IFM_SUBTYPE(ife->ifm_media)) { 1191 case IFM_AUTO: 1192 case IFM_1000_T: 1193 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1194 break; 1195 case IFM_100_TX: 1196 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1197 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1198 break; 1199 case IFM_10_T: 1200 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1201 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1202 break; 1203 default: 1204 ecr = ecr0; 1205 rcr = rcr0; 1206 tcr = tcr0; 1207 break; 1208 } 1209 1210 if (sc->sc_flowflags & IFM_FLOW) 1211 rcr |= ENET_RCR_FCE; 1212 else 1213 rcr &= ~ENET_RCR_FCE; 1214 1215 /* update registers if need change */ 1216 if (ecr != ecr0) 1217 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1218 if (rcr != rcr0) 1219 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1220 if (tcr != tcr0) 1221 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1222 } 1223 1224 /* 1225 * handling descriptors 1226 */ 1227 static void 1228 enet_init_txring(struct enet_softc *sc) 1229 { 1230 int i; 1231 1232 /* build TX ring */ 1233 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1234 sc->sc_txdesc_ring[i].tx_flags1_len = 1235 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1236 sc->sc_txdesc_ring[i].tx_databuf = 0; 1237 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1238 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1239 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1240 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1241 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1242 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1243 1244 TXDESC_WRITEOUT(i); 1245 } 1246 1247 sc->sc_tx_free = ENET_TX_RING_CNT; 1248 sc->sc_tx_considx = 0; 1249 sc->sc_tx_prodidx = 0; 1250 } 1251 1252 static int 1253 enet_init_rxring(struct enet_softc *sc) 1254 { 1255 int i, error; 1256 1257 /* build RX ring */ 1258 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1259 error = enet_alloc_rxbuf(sc, i); 1260 if (error != 0) 1261 return error; 1262 } 1263 1264 sc->sc_rx_readidx = 0; 1265 1266 return 0; 1267 } 1268 1269 static int 1270 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1271 { 1272 struct mbuf *m; 1273 int error; 1274 1275 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1276 1277 /* free mbuf if already allocated */ 1278 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1279 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1280 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1281 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1282 } 1283 1284 /* allocate new mbuf cluster */ 1285 MGETHDR(m, M_DONTWAIT, MT_DATA); 1286 if (m == NULL) 1287 return ENOBUFS; 1288 MCLGET(m, M_DONTWAIT); 1289 if (!(m->m_flags & M_EXT)) { 1290 m_freem(m); 1291 return ENOBUFS; 1292 } 1293 m->m_len = MCLBYTES; 1294 m->m_next = NULL; 1295 1296 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1297 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1298 BUS_DMA_READ | BUS_DMA_NOWAIT); 1299 if (error) { 1300 m_freem(m); 1301 return error; 1302 } 1303 1304 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1305 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1307 1308 sc->sc_rxsoft[idx].rxs_mbuf = m; 1309 enet_reset_rxdesc(sc, idx); 1310 return 0; 1311 } 1312 1313 static void 1314 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1315 { 1316 uint32_t paddr; 1317 1318 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1319 1320 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1321 RXFLAGS1_E | 1322 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1323 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1324 sc->sc_rxdesc_ring[idx].rx_flags2 = 1325 RXFLAGS2_INT; 1326 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1327 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1328 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1329 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1330 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1331 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1332 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1333 1334 RXDESC_WRITEOUT(idx); 1335 } 1336 1337 static void 1338 enet_drain_txbuf(struct enet_softc *sc) 1339 { 1340 int idx; 1341 struct enet_txsoft *txs; 1342 struct ifnet *ifp; 1343 1344 ifp = &sc->sc_ethercom.ec_if; 1345 1346 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1347 idx = ENET_TX_NEXTIDX(idx)) { 1348 1349 /* txsoft[] is used only first segment */ 1350 txs = &sc->sc_txsoft[idx]; 1351 TXDESC_READIN(idx); 1352 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1353 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1354 bus_dmamap_unload(sc->sc_dmat, 1355 txs->txs_dmamap); 1356 m_freem(txs->txs_mbuf); 1357 1358 ifp->if_oerrors++; 1359 } 1360 sc->sc_tx_free++; 1361 } 1362 } 1363 1364 static void 1365 enet_drain_rxbuf(struct enet_softc *sc) 1366 { 1367 int i; 1368 1369 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1370 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1371 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1372 bus_dmamap_unload(sc->sc_dmat, 1373 sc->sc_rxsoft[i].rxs_dmamap); 1374 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1375 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1376 } 1377 } 1378 } 1379 1380 static int 1381 enet_alloc_ring(struct enet_softc *sc) 1382 { 1383 int i, error; 1384 1385 /* 1386 * build DMA maps for TX. 1387 * TX descriptor must be able to contain mbuf chains, 1388 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1389 */ 1390 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1391 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1392 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1393 &sc->sc_txsoft[i].txs_dmamap); 1394 1395 if (error) { 1396 aprint_error_dev(sc->sc_dev, 1397 "can't create DMA map for TX descs\n"); 1398 goto fail_1; 1399 } 1400 } 1401 1402 /* 1403 * build DMA maps for RX. 1404 * RX descripter contains An mbuf cluster, 1405 * and make up a dmamap. 1406 */ 1407 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1408 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1409 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1410 &sc->sc_rxsoft[i].rxs_dmamap); 1411 if (error) { 1412 aprint_error_dev(sc->sc_dev, 1413 "can't create DMA map for RX descs\n"); 1414 goto fail_2; 1415 } 1416 } 1417 1418 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1419 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1420 return -1; 1421 memset(sc->sc_txdesc_ring, 0, 1422 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1423 1424 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1425 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1426 return -1; 1427 memset(sc->sc_rxdesc_ring, 0, 1428 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1429 1430 return 0; 1431 1432 fail_2: 1433 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1434 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1435 bus_dmamap_destroy(sc->sc_dmat, 1436 sc->sc_rxsoft[i].rxs_dmamap); 1437 } 1438 fail_1: 1439 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1440 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1441 bus_dmamap_destroy(sc->sc_dmat, 1442 sc->sc_txsoft[i].txs_dmamap); 1443 } 1444 return error; 1445 } 1446 1447 static int 1448 enet_encap_mbufalign(struct mbuf **mp) 1449 { 1450 struct mbuf *m, *m0, *mt, *p, *x; 1451 void *ap; 1452 uint32_t alignoff, chiplen; 1453 1454 /* 1455 * iMX6 SoC ethernet controller requires 1456 * address of buffer must aligned 8, and 1457 * length of buffer must be greater than 10 (first fragment only?) 1458 */ 1459 #define ALIGNBYTE 8 1460 #define MINBUFSIZE 10 1461 #define ALIGN_PTR(p, align) \ 1462 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1463 1464 m0 = *mp; 1465 mt = p = NULL; 1466 for (m = m0; m != NULL; m = m->m_next) { 1467 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1468 if (m->m_len < (ALIGNBYTE * 2)) { 1469 /* 1470 * rearrange mbuf data aligned 1471 * 1472 * align 8 * * * * * 1473 * +0123456789abcdef0123456789abcdef0 1474 * FROM m->m_data[___________abcdefghijklmn_______] 1475 * 1476 * +0123456789abcdef0123456789abcdef0 1477 * TO m->m_data[________abcdefghijklm___________] or 1478 * m->m_data[________________abcdefghijklmn__] 1479 */ 1480 if ((alignoff != 0) && (m->m_len != 0)) { 1481 chiplen = ALIGNBYTE - alignoff; 1482 if (M_LEADINGSPACE(m) >= alignoff) { 1483 ap = m->m_data - alignoff; 1484 memmove(ap, m->m_data, m->m_len); 1485 m->m_data = ap; 1486 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1487 ap = m->m_data + chiplen; 1488 memmove(ap, m->m_data, m->m_len); 1489 m->m_data = ap; 1490 } else { 1491 /* 1492 * no space to align data. (M_READONLY?) 1493 * allocate new mbuf aligned, 1494 * and copy to it. 1495 */ 1496 MGET(x, M_DONTWAIT, m->m_type); 1497 if (x == NULL) { 1498 m_freem(m); 1499 return ENOBUFS; 1500 } 1501 MCLAIM(x, m->m_owner); 1502 if (m->m_flags & M_PKTHDR) 1503 M_MOVE_PKTHDR(x, m); 1504 x->m_len = m->m_len; 1505 x->m_data = ALIGN_PTR(x->m_data, 1506 ALIGNBYTE); 1507 memcpy(mtod(x, void *), mtod(m, void *), 1508 m->m_len); 1509 p->m_next = x; 1510 MFREE(m, x->m_next); 1511 m = x; 1512 } 1513 } 1514 1515 /* 1516 * fill 1st mbuf at least 10byte 1517 * 1518 * align 8 * * * * * 1519 * +0123456789abcdef0123456789abcdef0 1520 * FROM m->m_data[________abcde___________________] 1521 * m->m_data[__fg____________________________] 1522 * m->m_data[_________________hi_____________] 1523 * m->m_data[__________jk____________________] 1524 * m->m_data[____l___________________________] 1525 * 1526 * +0123456789abcdef0123456789abcdef0 1527 * TO m->m_data[________abcdefghij______________] 1528 * m->m_data[________________________________] 1529 * m->m_data[________________________________] 1530 * m->m_data[___________k____________________] 1531 * m->m_data[____l___________________________] 1532 */ 1533 if (mt == NULL) { 1534 mt = m; 1535 while (mt->m_len == 0) { 1536 mt = mt->m_next; 1537 if (mt == NULL) { 1538 m_freem(m); 1539 return ENOBUFS; 1540 } 1541 } 1542 1543 /* mt = 1st mbuf, x = 2nd mbuf */ 1544 x = mt->m_next; 1545 while (mt->m_len < MINBUFSIZE) { 1546 if (x == NULL) { 1547 m_freem(m); 1548 return ENOBUFS; 1549 } 1550 1551 alignoff = (uintptr_t)x->m_data & 1552 (ALIGNBYTE - 1); 1553 chiplen = ALIGNBYTE - alignoff; 1554 if (chiplen > x->m_len) { 1555 chiplen = x->m_len; 1556 } else if ((mt->m_len + chiplen) < 1557 MINBUFSIZE) { 1558 /* 1559 * next mbuf should be greater 1560 * than ALIGNBYTE? 1561 */ 1562 if (x->m_len >= (chiplen + 1563 ALIGNBYTE * 2)) 1564 chiplen += ALIGNBYTE; 1565 else 1566 chiplen = x->m_len; 1567 } 1568 1569 if (chiplen && 1570 (M_TRAILINGSPACE(mt) < chiplen)) { 1571 /* 1572 * move data to the begining of 1573 * m_dat[] (aligned) to en- 1574 * large trailingspace 1575 */ 1576 if (mt->m_flags & M_EXT) { 1577 ap = mt->m_ext.ext_buf; 1578 } else if (mt->m_flags & 1579 M_PKTHDR) { 1580 ap = mt->m_pktdat; 1581 } else { 1582 ap = mt->m_dat; 1583 } 1584 ap = ALIGN_PTR(ap, ALIGNBYTE); 1585 memcpy(ap, mt->m_data, mt->m_len); 1586 mt->m_data = ap; 1587 } 1588 1589 if (chiplen && 1590 (M_TRAILINGSPACE(mt) >= chiplen)) { 1591 memcpy(mt->m_data + mt->m_len, 1592 x->m_data, chiplen); 1593 mt->m_len += chiplen; 1594 m_adj(x, chiplen); 1595 } 1596 1597 x = x->m_next; 1598 } 1599 } 1600 1601 } else { 1602 mt = m; 1603 1604 /* 1605 * allocate new mbuf x, and rearrange as below; 1606 * 1607 * align 8 * * * * * 1608 * +0123456789abcdef0123456789abcdef0 1609 * FROM m->m_data[____________abcdefghijklmnopq___] 1610 * 1611 * +0123456789abcdef0123456789abcdef0 1612 * TO x->m_data[________abcdefghijkl____________] 1613 * m->m_data[________________________mnopq___] 1614 * 1615 */ 1616 if (alignoff != 0) { 1617 /* at least ALIGNBYTE */ 1618 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1619 1620 MGET(x, M_DONTWAIT, m->m_type); 1621 if (x == NULL) { 1622 m_freem(m); 1623 return ENOBUFS; 1624 } 1625 MCLAIM(x, m->m_owner); 1626 if (m->m_flags & M_PKTHDR) 1627 M_MOVE_PKTHDR(x, m); 1628 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1629 memcpy(mtod(x, void *), mtod(m, void *), 1630 chiplen); 1631 x->m_len = chiplen; 1632 x->m_next = m; 1633 m_adj(m, chiplen); 1634 1635 if (p == NULL) 1636 m0 = x; 1637 else 1638 p->m_next = x; 1639 } 1640 } 1641 p = m; 1642 } 1643 *mp = m0; 1644 1645 return 0; 1646 } 1647 1648 static int 1649 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1650 { 1651 bus_dmamap_t map; 1652 struct mbuf *m; 1653 int csumflags, idx, i, error; 1654 uint32_t flags1, flags2; 1655 1656 idx = sc->sc_tx_prodidx; 1657 map = sc->sc_txsoft[idx].txs_dmamap; 1658 1659 /* align mbuf data for claim of ENET */ 1660 error = enet_encap_mbufalign(mp); 1661 if (error != 0) 1662 return error; 1663 1664 m = *mp; 1665 csumflags = m->m_pkthdr.csum_flags; 1666 1667 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1668 BUS_DMA_NOWAIT); 1669 if (error != 0) { 1670 device_printf(sc->sc_dev, 1671 "Error mapping mbuf into TX chain: error=%d\n", error); 1672 m_freem(m); 1673 return error; 1674 } 1675 1676 if (map->dm_nsegs > sc->sc_tx_free) { 1677 bus_dmamap_unload(sc->sc_dmat, map); 1678 device_printf(sc->sc_dev, 1679 "too many mbuf chain %d\n", map->dm_nsegs); 1680 m_freem(m); 1681 return ENOBUFS; 1682 } 1683 1684 /* fill protocol cksum zero beforehand */ 1685 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1686 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1687 struct mbuf *m1; 1688 int ehlen, moff; 1689 uint16_t etype; 1690 1691 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1692 switch (ntohs(etype)) { 1693 case ETHERTYPE_IP: 1694 case ETHERTYPE_IPV6: 1695 ehlen = ETHER_HDR_LEN; 1696 break; 1697 case ETHERTYPE_VLAN: 1698 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1699 break; 1700 default: 1701 ehlen = 0; 1702 break; 1703 } 1704 1705 if (ehlen) { 1706 m1 = m_getptr(m, ehlen + 1707 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1708 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data), 1709 &moff); 1710 if (m1 != NULL) 1711 *(uint16_t *)(mtod(m1, char *) + moff) = 0; 1712 } 1713 } 1714 1715 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1716 BUS_DMASYNC_PREWRITE); 1717 1718 for (i = 0; i < map->dm_nsegs; i++) { 1719 flags1 = TXFLAGS1_R; 1720 flags2 = 0; 1721 1722 if (i == 0) { 1723 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1724 sc->sc_txsoft[idx].txs_mbuf = m; 1725 } 1726 1727 /* checksum offloading */ 1728 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1729 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1730 flags2 |= TXFLAGS2_PINS; 1731 if (csumflags & (M_CSUM_IPv4)) 1732 flags2 |= TXFLAGS2_IINS; 1733 1734 if (i == map->dm_nsegs - 1) { 1735 /* mark last segment */ 1736 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1737 flags2 |= TXFLAGS2_INT; 1738 } 1739 if (idx == ENET_TX_RING_CNT - 1) { 1740 /* mark end of ring */ 1741 flags1 |= TXFLAGS1_W; 1742 } 1743 1744 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1745 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1746 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1747 TXDESC_WRITEOUT(idx); 1748 1749 sc->sc_txdesc_ring[idx].tx_flags1_len = 1750 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1751 TXDESC_WRITEOUT(idx); 1752 1753 idx = ENET_TX_NEXTIDX(idx); 1754 sc->sc_tx_free--; 1755 } 1756 1757 sc->sc_tx_prodidx = idx; 1758 1759 return 0; 1760 } 1761 1762 /* 1763 * device initialize 1764 */ 1765 static int 1766 enet_init_regs(struct enet_softc *sc, int init) 1767 { 1768 struct mii_data *mii; 1769 struct ifmedia_entry *ife; 1770 paddr_t paddr; 1771 uint32_t val; 1772 int fulldup, ecr_speed, rcr_speed, flowctrl; 1773 1774 if (init) { 1775 fulldup = 1; 1776 ecr_speed = ENET_ECR_SPEED; 1777 rcr_speed = 0; 1778 flowctrl = 0; 1779 } else { 1780 mii = &sc->sc_mii; 1781 ife = mii->mii_media.ifm_cur; 1782 1783 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1784 fulldup = 1; 1785 else 1786 fulldup = 0; 1787 1788 switch (IFM_SUBTYPE(ife->ifm_media)) { 1789 case IFM_10_T: 1790 ecr_speed = 0; 1791 rcr_speed = ENET_RCR_RMII_10T; 1792 break; 1793 case IFM_100_TX: 1794 ecr_speed = 0; 1795 rcr_speed = 0; 1796 break; 1797 default: 1798 ecr_speed = ENET_ECR_SPEED; 1799 rcr_speed = 0; 1800 break; 1801 } 1802 1803 flowctrl = sc->sc_flowflags & IFM_FLOW; 1804 } 1805 1806 /* reset */ 1807 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1808 1809 /* mask and clear all interrupt */ 1810 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1811 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1812 1813 /* full duplex */ 1814 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1815 1816 /* clear and enable MIB register */ 1817 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1818 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1819 1820 /* MII speed setup. MDCclk(=2.5MHz) = ENET_PLL/((val+1)*2) */ 1821 val = ((sc->sc_pllclock) / 500000 - 1) / 10; 1822 ENET_REG_WRITE(sc, ENET_MSCR, val << 1); 1823 1824 /* Opcode/Pause Duration */ 1825 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1826 1827 /* Receive FIFO */ 1828 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1829 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1830 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1831 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1832 1833 /* Transmit FIFO */ 1834 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1835 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1836 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1837 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1838 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1839 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1840 1841 /* hardware checksum is default off (override in TX descripter) */ 1842 ENET_REG_WRITE(sc, ENET_TACC, 0); 1843 1844 /* 1845 * align ethernet payload on 32bit, discard frames with MAC layer error, 1846 * and don't discard checksum error 1847 */ 1848 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1849 1850 /* maximum frame size */ 1851 val = ENET_DEFAULT_PKT_LEN; 1852 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1853 ENET_REG_WRITE(sc, ENET_RCR, 1854 ENET_RCR_PADEN | /* RX frame padding remove */ 1855 ENET_RCR_RGMII_EN | /* use RGMII */ 1856 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1857 rcr_speed | 1858 (fulldup ? 0 : ENET_RCR_DRT) | 1859 ENET_RCR_MAX_FL(val)); 1860 1861 /* Maximum Receive BufSize per one descriptor */ 1862 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1863 1864 1865 /* TX/RX Descriptor Physical Address */ 1866 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1867 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1868 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1869 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1870 /* sync cache */ 1871 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1872 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1873 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1874 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1875 1876 /* enable interrupts */ 1877 val = ENET_EIMR|ENET_EIR_TXF|ENET_EIR_RXF|ENET_EIR_EBERR; 1878 if (sc->sc_imxtype == 7) 1879 val |= ENET_EIR_TXF2|ENET_EIR_RXF2|ENET_EIR_TXF1|ENET_EIR_RXF1; 1880 ENET_REG_WRITE(sc, ENET_EIMR, val); 1881 1882 /* enable ether */ 1883 ENET_REG_WRITE(sc, ENET_ECR, 1884 #if _BYTE_ORDER == _LITTLE_ENDIAN 1885 ENET_ECR_DBSWP | 1886 #endif 1887 ENET_ECR_SPEED | /* default 1000Mbps mode */ 1888 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1889 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1890 1891 return 0; 1892 } 1893 1894 static int 1895 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1896 bus_dmamap_t *mapp) 1897 { 1898 bus_dma_segment_t seglist[1]; 1899 int nsegs, error; 1900 1901 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1902 1, &nsegs, M_NOWAIT)) != 0) { 1903 device_printf(sc->sc_dev, 1904 "unable to allocate DMA buffer, error=%d\n", error); 1905 goto fail_alloc; 1906 } 1907 1908 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1909 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1910 device_printf(sc->sc_dev, 1911 "unable to map DMA buffer, error=%d\n", 1912 error); 1913 goto fail_map; 1914 } 1915 1916 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1917 BUS_DMA_NOWAIT, mapp)) != 0) { 1918 device_printf(sc->sc_dev, 1919 "unable to create DMA map, error=%d\n", error); 1920 goto fail_create; 1921 } 1922 1923 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1924 BUS_DMA_NOWAIT)) != 0) { 1925 aprint_error_dev(sc->sc_dev, 1926 "unable to load DMA map, error=%d\n", error); 1927 goto fail_load; 1928 } 1929 1930 return 0; 1931 1932 fail_load: 1933 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1934 fail_create: 1935 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1936 fail_map: 1937 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1938 fail_alloc: 1939 return error; 1940 } 1941