1 /* $NetBSD: if_enet.c,v 1.17 2019/01/22 03:42:25 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.17 2019/01/22 03:42:25 msaitoh Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static int enet_intr(void *); 136 static void enet_tick(void *); 137 static int enet_tx_intr(void *); 138 static int enet_rx_intr(void *); 139 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 140 int); 141 142 static void enet_start(struct ifnet *); 143 static int enet_ifflags_cb(struct ethercom *); 144 static int enet_ioctl(struct ifnet *, u_long, void *); 145 static int enet_init(struct ifnet *); 146 static void enet_stop(struct ifnet *, int); 147 static void enet_watchdog(struct ifnet *); 148 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 149 150 static int enet_miibus_readreg(device_t, int, int, uint16_t *); 151 static int enet_miibus_writereg(device_t, int, int, uint16_t); 152 static void enet_miibus_statchg(struct ifnet *); 153 154 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 156 static void enet_setmulti(struct enet_softc *); 157 static int enet_encap_mbufalign(struct mbuf **); 158 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 159 static int enet_init_regs(struct enet_softc *, int); 160 static int enet_alloc_ring(struct enet_softc *); 161 static void enet_init_txring(struct enet_softc *); 162 static int enet_init_rxring(struct enet_softc *); 163 static void enet_reset_rxdesc(struct enet_softc *, int); 164 static int enet_alloc_rxbuf(struct enet_softc *, int); 165 static void enet_drain_txbuf(struct enet_softc *); 166 static void enet_drain_rxbuf(struct enet_softc *); 167 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 168 bus_dmamap_t *); 169 170 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 171 enet_match, enet_attach, NULL, NULL); 172 173 void 174 enet_attach_common(device_t self, bus_space_tag_t iot, 175 bus_dma_tag_t dmat, bus_addr_t addr, bus_size_t size, int irq) 176 { 177 struct enet_softc *sc; 178 struct ifnet *ifp; 179 180 sc = device_private(self); 181 sc->sc_dev = self; 182 sc->sc_iot = iot; 183 sc->sc_addr = addr; 184 sc->sc_dmat = dmat; 185 186 aprint_naive("\n"); 187 aprint_normal(": Gigabit Ethernet Controller\n"); 188 if (bus_space_map(sc->sc_iot, sc->sc_addr, size, 0, 189 &sc->sc_ioh)) { 190 aprint_error_dev(self, "cannot map registers\n"); 191 return; 192 } 193 194 /* allocate dma buffer */ 195 if (enet_alloc_ring(sc)) 196 return; 197 198 #define IS_ENADDR_ZERO(enaddr) \ 199 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 200 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 201 202 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 203 /* by any chance, mac-address is already set by bootloader? */ 204 enet_gethwaddr(sc, sc->sc_enaddr); 205 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 206 /* give up. set randomly */ 207 uint32_t eaddr = random(); 208 /* not multicast */ 209 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 210 sc->sc_enaddr[1] = eaddr >> 16; 211 sc->sc_enaddr[2] = eaddr >> 8; 212 sc->sc_enaddr[3] = eaddr; 213 eaddr = random(); 214 sc->sc_enaddr[4] = eaddr >> 8; 215 sc->sc_enaddr[5] = eaddr; 216 217 aprint_error_dev(self, 218 "cannot get mac address. set randomly\n"); 219 } 220 } 221 enet_sethwaddr(sc, sc->sc_enaddr); 222 223 aprint_normal_dev(self, "Ethernet address %s\n", 224 ether_sprintf(sc->sc_enaddr)); 225 226 enet_init_regs(sc, 1); 227 228 /* setup interrupt handlers */ 229 if ((sc->sc_ih = intr_establish(irq, IPL_NET, 230 IST_LEVEL, enet_intr, sc)) == NULL) { 231 aprint_error_dev(self, "unable to establish interrupt\n"); 232 goto failure; 233 } 234 235 if (sc->sc_imxtype == 7) { 236 /* i.MX7 use 3 interrupts */ 237 if ((sc->sc_ih2 = intr_establish(irq + 1, IPL_NET, 238 IST_LEVEL, enet_intr, sc)) == NULL) { 239 aprint_error_dev(self, 240 "unable to establish 2nd interrupt\n"); 241 intr_disestablish(sc->sc_ih); 242 goto failure; 243 } 244 if ((sc->sc_ih3 = intr_establish(irq + 2, IPL_NET, 245 IST_LEVEL, enet_intr, sc)) == NULL) { 246 aprint_error_dev(self, 247 "unable to establish 3rd interrupt\n"); 248 intr_disestablish(sc->sc_ih2); 249 intr_disestablish(sc->sc_ih); 250 goto failure; 251 } 252 } 253 254 /* callout will be scheduled from enet_init() */ 255 callout_init(&sc->sc_tick_ch, 0); 256 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 257 258 /* setup ifp */ 259 ifp = &sc->sc_ethercom.ec_if; 260 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 261 ifp->if_softc = sc; 262 ifp->if_mtu = ETHERMTU; 263 ifp->if_baudrate = IF_Gbps(1); 264 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 265 ifp->if_ioctl = enet_ioctl; 266 ifp->if_start = enet_start; 267 ifp->if_init = enet_init; 268 ifp->if_stop = enet_stop; 269 ifp->if_watchdog = enet_watchdog; 270 271 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 272 #ifdef ENET_SUPPORT_JUMBO 273 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 274 #endif 275 276 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 277 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 278 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 279 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 280 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 281 282 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 283 IFQ_SET_READY(&ifp->if_snd); 284 285 /* setup MII */ 286 sc->sc_ethercom.ec_mii = &sc->sc_mii; 287 sc->sc_mii.mii_ifp = ifp; 288 sc->sc_mii.mii_readreg = enet_miibus_readreg; 289 sc->sc_mii.mii_writereg = enet_miibus_writereg; 290 sc->sc_mii.mii_statchg = enet_miibus_statchg; 291 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 292 enet_mediastatus); 293 294 /* try to attach PHY */ 295 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 296 MII_OFFSET_ANY, 0); 297 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 298 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 299 0, NULL); 300 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 301 } else { 302 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 303 } 304 305 if_attach(ifp); 306 ether_ifattach(ifp, sc->sc_enaddr); 307 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 308 309 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 310 RND_TYPE_NET, RND_FLAG_DEFAULT); 311 312 #ifdef ENET_EVENT_COUNTER 313 enet_attach_evcnt(sc); 314 #endif 315 316 sc->sc_stopping = false; 317 318 return; 319 320 failure: 321 bus_space_unmap(sc->sc_iot, sc->sc_ioh, size); 322 return; 323 } 324 325 #ifdef ENET_EVENT_COUNTER 326 static void 327 enet_attach_evcnt(struct enet_softc *sc) 328 { 329 const char *xname; 330 331 xname = device_xname(sc->sc_dev); 332 333 #define ENET_EVCNT_ATTACH(name) \ 334 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 335 NULL, xname, #name); 336 337 ENET_EVCNT_ATTACH(t_drop); 338 ENET_EVCNT_ATTACH(t_packets); 339 ENET_EVCNT_ATTACH(t_bc_pkt); 340 ENET_EVCNT_ATTACH(t_mc_pkt); 341 ENET_EVCNT_ATTACH(t_crc_align); 342 ENET_EVCNT_ATTACH(t_undersize); 343 ENET_EVCNT_ATTACH(t_oversize); 344 ENET_EVCNT_ATTACH(t_frag); 345 ENET_EVCNT_ATTACH(t_jab); 346 ENET_EVCNT_ATTACH(t_col); 347 ENET_EVCNT_ATTACH(t_p64); 348 ENET_EVCNT_ATTACH(t_p65to127n); 349 ENET_EVCNT_ATTACH(t_p128to255n); 350 ENET_EVCNT_ATTACH(t_p256to511); 351 ENET_EVCNT_ATTACH(t_p512to1023); 352 ENET_EVCNT_ATTACH(t_p1024to2047); 353 ENET_EVCNT_ATTACH(t_p_gte2048); 354 ENET_EVCNT_ATTACH(t_octets); 355 ENET_EVCNT_ATTACH(r_packets); 356 ENET_EVCNT_ATTACH(r_bc_pkt); 357 ENET_EVCNT_ATTACH(r_mc_pkt); 358 ENET_EVCNT_ATTACH(r_crc_align); 359 ENET_EVCNT_ATTACH(r_undersize); 360 ENET_EVCNT_ATTACH(r_oversize); 361 ENET_EVCNT_ATTACH(r_frag); 362 ENET_EVCNT_ATTACH(r_jab); 363 ENET_EVCNT_ATTACH(r_p64); 364 ENET_EVCNT_ATTACH(r_p65to127); 365 ENET_EVCNT_ATTACH(r_p128to255); 366 ENET_EVCNT_ATTACH(r_p256to511); 367 ENET_EVCNT_ATTACH(r_p512to1023); 368 ENET_EVCNT_ATTACH(r_p1024to2047); 369 ENET_EVCNT_ATTACH(r_p_gte2048); 370 ENET_EVCNT_ATTACH(r_octets); 371 } 372 373 static void 374 enet_update_evcnt(struct enet_softc *sc) 375 { 376 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 377 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 378 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 379 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 380 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 381 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 382 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 383 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 384 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 385 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 386 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 387 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 388 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 389 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 390 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 391 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 392 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 393 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 394 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 395 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 396 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 397 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 398 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 399 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 400 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 401 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 402 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 403 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 404 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 405 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 406 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 407 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 408 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 409 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 410 } 411 #endif /* ENET_EVENT_COUNTER */ 412 413 static void 414 enet_tick(void *arg) 415 { 416 struct enet_softc *sc; 417 struct mii_data *mii; 418 struct ifnet *ifp; 419 int s; 420 421 sc = arg; 422 mii = &sc->sc_mii; 423 ifp = &sc->sc_ethercom.ec_if; 424 425 s = splnet(); 426 427 if (sc->sc_stopping) 428 goto out; 429 430 #ifdef ENET_EVENT_COUNTER 431 enet_update_evcnt(sc); 432 #endif 433 434 /* update counters */ 435 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 436 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 437 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 438 439 /* clear counters */ 440 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 441 ENET_REG_WRITE(sc, ENET_MIBC, 0); 442 443 mii_tick(mii); 444 out: 445 446 if (!sc->sc_stopping) 447 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 448 449 splx(s); 450 } 451 452 static int 453 enet_intr(void *arg) 454 { 455 struct enet_softc *sc; 456 struct ifnet *ifp; 457 uint32_t status; 458 459 sc = arg; 460 status = ENET_REG_READ(sc, ENET_EIR); 461 462 if (sc->sc_imxtype == 7) { 463 if (status & (ENET_EIR_TXF|ENET_EIR_TXF1|ENET_EIR_TXF2)) 464 enet_tx_intr(arg); 465 if (status & (ENET_EIR_RXF|ENET_EIR_RXF1|ENET_EIR_RXF2)) 466 enet_rx_intr(arg); 467 } else { 468 if (status & ENET_EIR_TXF) 469 enet_tx_intr(arg); 470 if (status & ENET_EIR_RXF) 471 enet_rx_intr(arg); 472 } 473 474 if (status & ENET_EIR_EBERR) { 475 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 476 ifp = &sc->sc_ethercom.ec_if; 477 enet_stop(ifp, 1); 478 enet_init(ifp); 479 } else { 480 ENET_REG_WRITE(sc, ENET_EIR, status); 481 } 482 483 rnd_add_uint32(&sc->sc_rnd_source, status); 484 485 return 1; 486 } 487 488 static int 489 enet_tx_intr(void *arg) 490 { 491 struct enet_softc *sc; 492 struct ifnet *ifp; 493 struct enet_txsoft *txs; 494 int idx; 495 496 sc = (struct enet_softc *)arg; 497 ifp = &sc->sc_ethercom.ec_if; 498 499 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 500 idx = ENET_TX_NEXTIDX(idx)) { 501 502 txs = &sc->sc_txsoft[idx]; 503 504 TXDESC_READIN(idx); 505 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 506 /* This TX Descriptor has not been transmitted yet */ 507 break; 508 } 509 510 /* txsoft is available on first segment (TXFLAGS1_T1) */ 511 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 512 bus_dmamap_unload(sc->sc_dmat, 513 txs->txs_dmamap); 514 m_freem(txs->txs_mbuf); 515 ifp->if_opackets++; 516 } 517 518 /* checking error */ 519 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 520 uint32_t flags2; 521 522 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 523 524 if (flags2 & (TXFLAGS2_TXE | 525 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 526 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 527 #ifdef DEBUG_ENET 528 if (enet_debug) { 529 char flagsbuf[128]; 530 531 snprintb(flagsbuf, sizeof(flagsbuf), 532 "\20" "\20TRANSMIT" "\16UNDERFLOW" 533 "\15COLLISION" "\14FRAME" 534 "\13LATECOLLISION" "\12OVERFLOW", 535 flags2); 536 537 device_printf(sc->sc_dev, 538 "txdesc[%d]: transmit error: " 539 "flags2=%s\n", idx, flagsbuf); 540 } 541 #endif /* DEBUG_ENET */ 542 ifp->if_oerrors++; 543 } 544 } 545 546 sc->sc_tx_free++; 547 } 548 sc->sc_tx_considx = idx; 549 550 if (sc->sc_tx_free > 0) 551 ifp->if_flags &= ~IFF_OACTIVE; 552 553 /* 554 * No more pending TX descriptor, 555 * cancel the watchdog timer. 556 */ 557 if (sc->sc_tx_free == ENET_TX_RING_CNT) 558 ifp->if_timer = 0; 559 560 return 1; 561 } 562 563 static int 564 enet_rx_intr(void *arg) 565 { 566 struct enet_softc *sc; 567 struct ifnet *ifp; 568 struct enet_rxsoft *rxs; 569 int idx, len, amount; 570 uint32_t flags1, flags2; 571 struct mbuf *m, *m0, *mprev; 572 573 sc = arg; 574 ifp = &sc->sc_ethercom.ec_if; 575 576 m0 = mprev = NULL; 577 amount = 0; 578 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 579 580 rxs = &sc->sc_rxsoft[idx]; 581 582 RXDESC_READIN(idx); 583 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 584 /* This RX Descriptor has not been received yet */ 585 break; 586 } 587 588 /* 589 * build mbuf from RX Descriptor if needed 590 */ 591 m = rxs->rxs_mbuf; 592 rxs->rxs_mbuf = NULL; 593 594 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 595 len = RXFLAGS1_LEN(flags1); 596 597 #define RACC_SHIFT16 2 598 if (m0 == NULL) { 599 m0 = m; 600 m_adj(m0, RACC_SHIFT16); 601 len -= RACC_SHIFT16; 602 m->m_len = len; 603 amount = len; 604 } else { 605 if (flags1 & RXFLAGS1_L) 606 len = len - amount - RACC_SHIFT16; 607 608 m->m_len = len; 609 amount += len; 610 if (m->m_flags & M_PKTHDR) 611 m_remove_pkthdr(m); 612 mprev->m_next = m; 613 } 614 mprev = m; 615 616 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 617 618 if (flags1 & RXFLAGS1_L) { 619 /* last buffer */ 620 if ((amount < ETHER_HDR_LEN) || 621 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 622 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 623 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 624 RXFLAGS2_CE)))) { 625 626 #ifdef DEBUG_ENET 627 if (enet_debug) { 628 char flags1buf[128], flags2buf[128]; 629 snprintb(flags1buf, sizeof(flags1buf), 630 "\20" "\31MISS" "\26LENGTHVIOLATION" 631 "\25NONOCTET" "\23CRC" "\22OVERRUN" 632 "\21TRUNCATED", flags1); 633 snprintb(flags2buf, sizeof(flags2buf), 634 "\20" "\40MAC" "\33PHY" 635 "\32COLLISION", flags2); 636 637 DEVICE_DPRINTF( 638 "rxdesc[%d]: receive error: " 639 "flags1=%s,flags2=%s,len=%d\n", 640 idx, flags1buf, flags2buf, amount); 641 } 642 #endif /* DEBUG_ENET */ 643 ifp->if_ierrors++; 644 m_freem(m0); 645 646 } else { 647 /* packet receive ok */ 648 m_set_rcvif(m0, ifp); 649 m0->m_pkthdr.len = amount; 650 651 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 652 rxs->rxs_dmamap->dm_mapsize, 653 BUS_DMASYNC_PREREAD); 654 655 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 656 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 657 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 658 enet_rx_csum(sc, ifp, m0, idx); 659 660 if_percpuq_enqueue(ifp->if_percpuq, m0); 661 } 662 663 m0 = NULL; 664 mprev = NULL; 665 amount = 0; 666 667 } else { 668 /* continued from previous buffer */ 669 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 670 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 671 } 672 673 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 674 if (enet_alloc_rxbuf(sc, idx) != 0) { 675 panic("enet_alloc_rxbuf NULL\n"); 676 } 677 } 678 sc->sc_rx_readidx = idx; 679 680 /* re-enable RX DMA to make sure */ 681 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 682 683 return 1; 684 } 685 686 static void 687 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 688 { 689 uint32_t flags2; 690 uint8_t proto; 691 692 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 693 694 if (flags2 & RXFLAGS2_IPV6) { 695 proto = sc->sc_rxdesc_ring[idx].rx_proto; 696 697 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 698 if ((proto == IPPROTO_TCP) && 699 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 700 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 701 else if ((proto == IPPROTO_UDP) && 702 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 703 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 704 else 705 return; 706 707 /* IPv6 protocol checksum error */ 708 if (flags2 & RXFLAGS2_PCR) 709 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 710 711 } else { 712 struct ether_header *eh; 713 uint8_t *ip; 714 715 eh = mtod(m, struct ether_header *); 716 717 /* XXX: is an IPv4? */ 718 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 719 return; 720 ip = (uint8_t *)(eh + 1); 721 if ((ip[0] & 0xf0) == 0x40) 722 return; 723 724 proto = sc->sc_rxdesc_ring[idx].rx_proto; 725 if (flags2 & RXFLAGS2_ICE) { 726 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 727 m->m_pkthdr.csum_flags |= 728 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 729 } 730 } else { 731 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 732 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 733 } 734 735 /* 736 * PCR is valid when 737 * ICE == 0 and FRAG == 0 738 */ 739 if (flags2 & RXFLAGS2_FRAG) 740 return; 741 742 /* 743 * PCR is valid when proto is TCP or UDP 744 */ 745 if ((proto == IPPROTO_TCP) && 746 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 747 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 748 else if ((proto == IPPROTO_UDP) && 749 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 750 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 751 else 752 return; 753 754 /* IPv4 protocol cksum error */ 755 if (flags2 & RXFLAGS2_PCR) 756 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 757 } 758 } 759 } 760 761 static void 762 enet_setmulti(struct enet_softc *sc) 763 { 764 struct ifnet *ifp; 765 struct ether_multi *enm; 766 struct ether_multistep step; 767 int promisc; 768 uint32_t crc; 769 uint32_t gaddr[2]; 770 771 ifp = &sc->sc_ethercom.ec_if; 772 773 promisc = 0; 774 if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) { 775 ifp->if_flags |= IFF_ALLMULTI; 776 if (ifp->if_flags & IFF_PROMISC) 777 promisc = 1; 778 gaddr[0] = gaddr[1] = 0xffffffff; 779 } else { 780 gaddr[0] = gaddr[1] = 0; 781 782 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 783 while (enm != NULL) { 784 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 785 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 786 ETHER_NEXT_MULTI(step, enm); 787 } 788 } 789 790 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 791 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 792 793 if (promisc) { 794 /* match all packet */ 795 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 796 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 797 } else { 798 /* don't match any packet */ 799 ENET_REG_WRITE(sc, ENET_IAUR, 0); 800 ENET_REG_WRITE(sc, ENET_IALR, 0); 801 } 802 } 803 804 static void 805 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 806 { 807 uint32_t paddr; 808 809 paddr = ENET_REG_READ(sc, ENET_PALR); 810 hwaddr[0] = paddr >> 24; 811 hwaddr[1] = paddr >> 16; 812 hwaddr[2] = paddr >> 8; 813 hwaddr[3] = paddr; 814 815 paddr = ENET_REG_READ(sc, ENET_PAUR); 816 hwaddr[4] = paddr >> 24; 817 hwaddr[5] = paddr >> 16; 818 } 819 820 static void 821 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 822 { 823 uint32_t paddr; 824 825 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 826 hwaddr[3]; 827 ENET_REG_WRITE(sc, ENET_PALR, paddr); 828 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 829 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 830 } 831 832 /* 833 * ifnet interfaces 834 */ 835 static int 836 enet_init(struct ifnet *ifp) 837 { 838 struct enet_softc *sc; 839 int s, error; 840 841 sc = ifp->if_softc; 842 843 s = splnet(); 844 845 enet_init_regs(sc, 0); 846 enet_init_txring(sc); 847 error = enet_init_rxring(sc); 848 if (error != 0) { 849 enet_drain_rxbuf(sc); 850 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 851 goto init_failure; 852 } 853 854 /* reload mac address */ 855 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 856 enet_sethwaddr(sc, sc->sc_enaddr); 857 858 /* program multicast address */ 859 enet_setmulti(sc); 860 861 /* update if_flags */ 862 ifp->if_flags |= IFF_RUNNING; 863 ifp->if_flags &= ~IFF_OACTIVE; 864 865 /* update local copy of if_flags */ 866 sc->sc_if_flags = ifp->if_flags; 867 868 /* mii */ 869 mii_mediachg(&sc->sc_mii); 870 871 /* enable RX DMA */ 872 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 873 874 sc->sc_stopping = false; 875 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 876 877 init_failure: 878 splx(s); 879 880 return error; 881 } 882 883 static void 884 enet_start(struct ifnet *ifp) 885 { 886 struct enet_softc *sc; 887 struct mbuf *m; 888 int npkt; 889 890 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 891 return; 892 893 sc = ifp->if_softc; 894 for (npkt = 0; ; npkt++) { 895 IFQ_POLL(&ifp->if_snd, m); 896 if (m == NULL) 897 break; 898 899 if (sc->sc_tx_free <= 0) { 900 /* no tx descriptor now... */ 901 ifp->if_flags |= IFF_OACTIVE; 902 DEVICE_DPRINTF("TX descriptor is full\n"); 903 break; 904 } 905 906 IFQ_DEQUEUE(&ifp->if_snd, m); 907 908 if (enet_encap_txring(sc, &m) != 0) { 909 /* too many mbuf chains? */ 910 ifp->if_flags |= IFF_OACTIVE; 911 DEVICE_DPRINTF( 912 "TX descriptor is full. dropping packet\n"); 913 m_freem(m); 914 ifp->if_oerrors++; 915 break; 916 } 917 918 /* Pass the packet to any BPF listeners */ 919 bpf_mtap(ifp, m, BPF_D_OUT); 920 } 921 922 if (npkt) { 923 /* enable TX DMA */ 924 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 925 926 ifp->if_timer = 5; 927 } 928 } 929 930 static void 931 enet_stop(struct ifnet *ifp, int disable) 932 { 933 struct enet_softc *sc; 934 int s; 935 uint32_t v; 936 937 sc = ifp->if_softc; 938 939 s = splnet(); 940 941 sc->sc_stopping = true; 942 callout_stop(&sc->sc_tick_ch); 943 944 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 945 v = ENET_REG_READ(sc, ENET_ECR); 946 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 947 948 /* Mark the interface as down and cancel the watchdog timer. */ 949 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 950 ifp->if_timer = 0; 951 952 if (disable) { 953 enet_drain_txbuf(sc); 954 enet_drain_rxbuf(sc); 955 } 956 957 splx(s); 958 } 959 960 static void 961 enet_watchdog(struct ifnet *ifp) 962 { 963 struct enet_softc *sc; 964 int s; 965 966 sc = ifp->if_softc; 967 s = splnet(); 968 969 device_printf(sc->sc_dev, "watchdog timeout\n"); 970 ifp->if_oerrors++; 971 972 /* salvage packets left in descriptors */ 973 enet_tx_intr(sc); 974 enet_rx_intr(sc); 975 976 /* reset */ 977 enet_stop(ifp, 1); 978 enet_init(ifp); 979 980 splx(s); 981 } 982 983 static void 984 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 985 { 986 struct enet_softc *sc = ifp->if_softc; 987 988 ether_mediastatus(ifp, ifmr); 989 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 990 | sc->sc_flowflags; 991 } 992 993 static int 994 enet_ifflags_cb(struct ethercom *ec) 995 { 996 struct ifnet *ifp = &ec->ec_if; 997 struct enet_softc *sc = ifp->if_softc; 998 int change = ifp->if_flags ^ sc->sc_if_flags; 999 1000 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1001 return ENETRESET; 1002 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1003 return 0; 1004 1005 enet_setmulti(sc); 1006 1007 sc->sc_if_flags = ifp->if_flags; 1008 return 0; 1009 } 1010 1011 static int 1012 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1013 { 1014 struct enet_softc *sc; 1015 struct ifreq *ifr; 1016 int s, error; 1017 uint32_t v; 1018 1019 sc = ifp->if_softc; 1020 ifr = data; 1021 1022 error = 0; 1023 1024 s = splnet(); 1025 1026 switch (command) { 1027 case SIOCSIFMTU: 1028 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1029 error = EINVAL; 1030 } else { 1031 ifp->if_mtu = ifr->ifr_mtu; 1032 1033 /* set maximum frame length */ 1034 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1035 ENET_REG_WRITE(sc, ENET_FTRL, v); 1036 v = ENET_REG_READ(sc, ENET_RCR); 1037 v &= ~ENET_RCR_MAX_FL(0x3fff); 1038 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 1039 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1040 ENET_REG_WRITE(sc, ENET_RCR, v); 1041 } 1042 break; 1043 case SIOCSIFMEDIA: 1044 case SIOCGIFMEDIA: 1045 /* Flow control requires full-duplex mode. */ 1046 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1047 (ifr->ifr_media & IFM_FDX) == 0) 1048 ifr->ifr_media &= ~IFM_ETH_FMASK; 1049 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1050 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1051 /* We can do both TXPAUSE and RXPAUSE. */ 1052 ifr->ifr_media |= 1053 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1054 } 1055 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1056 } 1057 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1058 break; 1059 default: 1060 error = ether_ioctl(ifp, command, data); 1061 if (error != ENETRESET) 1062 break; 1063 1064 /* post-process */ 1065 error = 0; 1066 switch (command) { 1067 case SIOCSIFCAP: 1068 error = (*ifp->if_init)(ifp); 1069 break; 1070 case SIOCADDMULTI: 1071 case SIOCDELMULTI: 1072 if (ifp->if_flags & IFF_RUNNING) 1073 enet_setmulti(sc); 1074 break; 1075 } 1076 break; 1077 } 1078 1079 splx(s); 1080 1081 return error; 1082 } 1083 1084 /* 1085 * for MII 1086 */ 1087 static int 1088 enet_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1089 { 1090 struct enet_softc *sc; 1091 int timeout; 1092 uint32_t status; 1093 1094 sc = device_private(dev); 1095 1096 /* clear MII update */ 1097 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1098 1099 /* read command */ 1100 ENET_REG_WRITE(sc, ENET_MMFR, 1101 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1102 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1103 1104 /* check MII update */ 1105 for (timeout = 5000; timeout > 0; --timeout) { 1106 status = ENET_REG_READ(sc, ENET_EIR); 1107 if (status & ENET_EIR_MII) 1108 break; 1109 } 1110 if (timeout <= 0) { 1111 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1112 reg); 1113 return ETIMEDOUT; 1114 } else 1115 *val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1116 1117 return 0; 1118 } 1119 1120 static int 1121 enet_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1122 { 1123 struct enet_softc *sc; 1124 int timeout; 1125 1126 sc = device_private(dev); 1127 1128 /* clear MII update */ 1129 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1130 1131 /* write command */ 1132 ENET_REG_WRITE(sc, ENET_MMFR, 1133 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1134 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1135 (ENET_MMFR_DATAMASK & val)); 1136 1137 /* check MII update */ 1138 for (timeout = 5000; timeout > 0; --timeout) { 1139 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1140 break; 1141 } 1142 if (timeout <= 0) { 1143 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", reg); 1144 return ETIMEDOUT; 1145 } 1146 1147 return 0; 1148 } 1149 1150 static void 1151 enet_miibus_statchg(struct ifnet *ifp) 1152 { 1153 struct enet_softc *sc; 1154 struct mii_data *mii; 1155 struct ifmedia_entry *ife; 1156 uint32_t ecr, ecr0; 1157 uint32_t rcr, rcr0; 1158 uint32_t tcr, tcr0; 1159 1160 sc = ifp->if_softc; 1161 mii = &sc->sc_mii; 1162 ife = mii->mii_media.ifm_cur; 1163 1164 /* get current status */ 1165 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1166 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1167 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1168 1169 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1170 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1171 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1172 mii->mii_media_active &= ~IFM_ETH_FMASK; 1173 } 1174 1175 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1176 tcr |= ENET_TCR_FDEN; /* full duplex */ 1177 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1178 } else { 1179 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1180 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1181 } 1182 1183 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1184 /* 1185 * need to reset because 1186 * FDEN can change when ECR[ETHEREN] is 0 1187 */ 1188 enet_init_regs(sc, 0); 1189 return; 1190 } 1191 1192 switch (IFM_SUBTYPE(ife->ifm_media)) { 1193 case IFM_AUTO: 1194 case IFM_1000_T: 1195 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1196 rcr &= ~ENET_RCR_RMII_10T; 1197 break; 1198 case IFM_100_TX: 1199 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1200 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1201 break; 1202 case IFM_10_T: 1203 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1204 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1205 break; 1206 default: 1207 ecr = ecr0; 1208 rcr = rcr0; 1209 tcr = tcr0; 1210 break; 1211 } 1212 1213 if (sc->sc_rgmii == 0) 1214 ecr &= ~ENET_ECR_SPEED; 1215 1216 if (sc->sc_flowflags & IFM_FLOW) 1217 rcr |= ENET_RCR_FCE; 1218 else 1219 rcr &= ~ENET_RCR_FCE; 1220 1221 /* update registers if need change */ 1222 if (ecr != ecr0) 1223 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1224 if (rcr != rcr0) 1225 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1226 if (tcr != tcr0) 1227 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1228 } 1229 1230 /* 1231 * handling descriptors 1232 */ 1233 static void 1234 enet_init_txring(struct enet_softc *sc) 1235 { 1236 int i; 1237 1238 /* build TX ring */ 1239 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1240 sc->sc_txdesc_ring[i].tx_flags1_len = 1241 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1242 sc->sc_txdesc_ring[i].tx_databuf = 0; 1243 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1244 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1245 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1246 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1247 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1248 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1249 1250 TXDESC_WRITEOUT(i); 1251 } 1252 1253 sc->sc_tx_free = ENET_TX_RING_CNT; 1254 sc->sc_tx_considx = 0; 1255 sc->sc_tx_prodidx = 0; 1256 } 1257 1258 static int 1259 enet_init_rxring(struct enet_softc *sc) 1260 { 1261 int i, error; 1262 1263 /* build RX ring */ 1264 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1265 error = enet_alloc_rxbuf(sc, i); 1266 if (error != 0) 1267 return error; 1268 } 1269 1270 sc->sc_rx_readidx = 0; 1271 1272 return 0; 1273 } 1274 1275 static int 1276 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1277 { 1278 struct mbuf *m; 1279 int error; 1280 1281 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1282 1283 /* free mbuf if already allocated */ 1284 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1285 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1286 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1287 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1288 } 1289 1290 /* allocate new mbuf cluster */ 1291 MGETHDR(m, M_DONTWAIT, MT_DATA); 1292 if (m == NULL) 1293 return ENOBUFS; 1294 MCLGET(m, M_DONTWAIT); 1295 if (!(m->m_flags & M_EXT)) { 1296 m_freem(m); 1297 return ENOBUFS; 1298 } 1299 m->m_len = MCLBYTES; 1300 m->m_next = NULL; 1301 1302 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1303 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1304 BUS_DMA_READ | BUS_DMA_NOWAIT); 1305 if (error) { 1306 m_freem(m); 1307 return error; 1308 } 1309 1310 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1311 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1312 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1313 1314 sc->sc_rxsoft[idx].rxs_mbuf = m; 1315 enet_reset_rxdesc(sc, idx); 1316 return 0; 1317 } 1318 1319 static void 1320 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1321 { 1322 uint32_t paddr; 1323 1324 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1325 1326 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1327 RXFLAGS1_E | 1328 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1329 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1330 sc->sc_rxdesc_ring[idx].rx_flags2 = 1331 RXFLAGS2_INT; 1332 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1333 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1334 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1335 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1336 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1337 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1338 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1339 1340 RXDESC_WRITEOUT(idx); 1341 } 1342 1343 static void 1344 enet_drain_txbuf(struct enet_softc *sc) 1345 { 1346 int idx; 1347 struct enet_txsoft *txs; 1348 struct ifnet *ifp; 1349 1350 ifp = &sc->sc_ethercom.ec_if; 1351 1352 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1353 idx = ENET_TX_NEXTIDX(idx)) { 1354 1355 /* txsoft[] is used only first segment */ 1356 txs = &sc->sc_txsoft[idx]; 1357 TXDESC_READIN(idx); 1358 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1359 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1360 bus_dmamap_unload(sc->sc_dmat, 1361 txs->txs_dmamap); 1362 m_freem(txs->txs_mbuf); 1363 1364 ifp->if_oerrors++; 1365 } 1366 sc->sc_tx_free++; 1367 } 1368 } 1369 1370 static void 1371 enet_drain_rxbuf(struct enet_softc *sc) 1372 { 1373 int i; 1374 1375 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1376 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1377 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1378 bus_dmamap_unload(sc->sc_dmat, 1379 sc->sc_rxsoft[i].rxs_dmamap); 1380 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1381 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1382 } 1383 } 1384 } 1385 1386 static int 1387 enet_alloc_ring(struct enet_softc *sc) 1388 { 1389 int i, error; 1390 1391 /* 1392 * build DMA maps for TX. 1393 * TX descriptor must be able to contain mbuf chains, 1394 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1395 */ 1396 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1397 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1398 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1399 &sc->sc_txsoft[i].txs_dmamap); 1400 1401 if (error) { 1402 aprint_error_dev(sc->sc_dev, 1403 "can't create DMA map for TX descs\n"); 1404 goto fail_1; 1405 } 1406 } 1407 1408 /* 1409 * build DMA maps for RX. 1410 * RX descripter contains An mbuf cluster, 1411 * and make up a dmamap. 1412 */ 1413 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1414 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1415 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1416 &sc->sc_rxsoft[i].rxs_dmamap); 1417 if (error) { 1418 aprint_error_dev(sc->sc_dev, 1419 "can't create DMA map for RX descs\n"); 1420 goto fail_2; 1421 } 1422 } 1423 1424 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1425 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1426 return -1; 1427 memset(sc->sc_txdesc_ring, 0, 1428 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1429 1430 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1431 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1432 return -1; 1433 memset(sc->sc_rxdesc_ring, 0, 1434 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1435 1436 return 0; 1437 1438 fail_2: 1439 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1440 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1441 bus_dmamap_destroy(sc->sc_dmat, 1442 sc->sc_rxsoft[i].rxs_dmamap); 1443 } 1444 fail_1: 1445 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1446 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1447 bus_dmamap_destroy(sc->sc_dmat, 1448 sc->sc_txsoft[i].txs_dmamap); 1449 } 1450 return error; 1451 } 1452 1453 static int 1454 enet_encap_mbufalign(struct mbuf **mp) 1455 { 1456 struct mbuf *m, *m0, *mt, *p, *x; 1457 void *ap; 1458 uint32_t alignoff, chiplen; 1459 1460 /* 1461 * iMX6 SoC ethernet controller requires 1462 * address of buffer must aligned 8, and 1463 * length of buffer must be greater than 10 (first fragment only?) 1464 */ 1465 #define ALIGNBYTE 8 1466 #define MINBUFSIZE 10 1467 #define ALIGN_PTR(p, align) \ 1468 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1469 1470 m0 = *mp; 1471 mt = p = NULL; 1472 for (m = m0; m != NULL; m = m->m_next) { 1473 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1474 if (m->m_len < (ALIGNBYTE * 2)) { 1475 /* 1476 * rearrange mbuf data aligned 1477 * 1478 * align 8 * * * * * 1479 * +0123456789abcdef0123456789abcdef0 1480 * FROM m->m_data[___________abcdefghijklmn_______] 1481 * 1482 * +0123456789abcdef0123456789abcdef0 1483 * TO m->m_data[________abcdefghijklm___________] or 1484 * m->m_data[________________abcdefghijklmn__] 1485 */ 1486 if ((alignoff != 0) && (m->m_len != 0)) { 1487 chiplen = ALIGNBYTE - alignoff; 1488 if (M_LEADINGSPACE(m) >= alignoff) { 1489 ap = m->m_data - alignoff; 1490 memmove(ap, m->m_data, m->m_len); 1491 m->m_data = ap; 1492 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1493 ap = m->m_data + chiplen; 1494 memmove(ap, m->m_data, m->m_len); 1495 m->m_data = ap; 1496 } else { 1497 /* 1498 * no space to align data. (M_READONLY?) 1499 * allocate new mbuf aligned, 1500 * and copy to it. 1501 */ 1502 MGET(x, M_DONTWAIT, m->m_type); 1503 if (x == NULL) { 1504 m_freem(m); 1505 return ENOBUFS; 1506 } 1507 MCLAIM(x, m->m_owner); 1508 if (m->m_flags & M_PKTHDR) 1509 m_move_pkthdr(x, m); 1510 x->m_len = m->m_len; 1511 x->m_data = ALIGN_PTR(x->m_data, 1512 ALIGNBYTE); 1513 memcpy(mtod(x, void *), mtod(m, void *), 1514 m->m_len); 1515 p->m_next = x; 1516 x->m_next = m_free(m); 1517 m = x; 1518 } 1519 } 1520 1521 /* 1522 * fill 1st mbuf at least 10byte 1523 * 1524 * align 8 * * * * * 1525 * +0123456789abcdef0123456789abcdef0 1526 * FROM m->m_data[________abcde___________________] 1527 * m->m_data[__fg____________________________] 1528 * m->m_data[_________________hi_____________] 1529 * m->m_data[__________jk____________________] 1530 * m->m_data[____l___________________________] 1531 * 1532 * +0123456789abcdef0123456789abcdef0 1533 * TO m->m_data[________abcdefghij______________] 1534 * m->m_data[________________________________] 1535 * m->m_data[________________________________] 1536 * m->m_data[___________k____________________] 1537 * m->m_data[____l___________________________] 1538 */ 1539 if (mt == NULL) { 1540 mt = m; 1541 while (mt->m_len == 0) { 1542 mt = mt->m_next; 1543 if (mt == NULL) { 1544 m_freem(m); 1545 return ENOBUFS; 1546 } 1547 } 1548 1549 /* mt = 1st mbuf, x = 2nd mbuf */ 1550 x = mt->m_next; 1551 while (mt->m_len < MINBUFSIZE) { 1552 if (x == NULL) { 1553 m_freem(m); 1554 return ENOBUFS; 1555 } 1556 1557 alignoff = (uintptr_t)x->m_data & 1558 (ALIGNBYTE - 1); 1559 chiplen = ALIGNBYTE - alignoff; 1560 if (chiplen > x->m_len) { 1561 chiplen = x->m_len; 1562 } else if ((mt->m_len + chiplen) < 1563 MINBUFSIZE) { 1564 /* 1565 * next mbuf should be greater 1566 * than ALIGNBYTE? 1567 */ 1568 if (x->m_len >= (chiplen + 1569 ALIGNBYTE * 2)) 1570 chiplen += ALIGNBYTE; 1571 else 1572 chiplen = x->m_len; 1573 } 1574 1575 if (chiplen && 1576 (M_TRAILINGSPACE(mt) < chiplen)) { 1577 /* 1578 * move data to the begining of 1579 * m_dat[] (aligned) to en- 1580 * large trailingspace 1581 */ 1582 if (mt->m_flags & M_EXT) { 1583 ap = mt->m_ext.ext_buf; 1584 } else if (mt->m_flags & 1585 M_PKTHDR) { 1586 ap = mt->m_pktdat; 1587 } else { 1588 ap = mt->m_dat; 1589 } 1590 ap = ALIGN_PTR(ap, ALIGNBYTE); 1591 memcpy(ap, mt->m_data, 1592 mt->m_len); 1593 mt->m_data = ap; 1594 } 1595 1596 if (chiplen && 1597 (M_TRAILINGSPACE(mt) >= chiplen)) { 1598 memcpy(mt->m_data + mt->m_len, 1599 x->m_data, chiplen); 1600 mt->m_len += chiplen; 1601 m_adj(x, chiplen); 1602 } 1603 1604 x = x->m_next; 1605 } 1606 } 1607 1608 } else { 1609 mt = m; 1610 1611 /* 1612 * allocate new mbuf x, and rearrange as below; 1613 * 1614 * align 8 * * * * * 1615 * +0123456789abcdef0123456789abcdef0 1616 * FROM m->m_data[____________abcdefghijklmnopq___] 1617 * 1618 * +0123456789abcdef0123456789abcdef0 1619 * TO x->m_data[________abcdefghijkl____________] 1620 * m->m_data[________________________mnopq___] 1621 * 1622 */ 1623 if (alignoff != 0) { 1624 /* at least ALIGNBYTE */ 1625 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1626 1627 MGET(x, M_DONTWAIT, m->m_type); 1628 if (x == NULL) { 1629 m_freem(m); 1630 return ENOBUFS; 1631 } 1632 MCLAIM(x, m->m_owner); 1633 if (m->m_flags & M_PKTHDR) 1634 m_move_pkthdr(x, m); 1635 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1636 memcpy(mtod(x, void *), mtod(m, void *), 1637 chiplen); 1638 x->m_len = chiplen; 1639 x->m_next = m; 1640 m_adj(m, chiplen); 1641 1642 if (p == NULL) 1643 m0 = x; 1644 else 1645 p->m_next = x; 1646 } 1647 } 1648 p = m; 1649 } 1650 *mp = m0; 1651 1652 return 0; 1653 } 1654 1655 static int 1656 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1657 { 1658 bus_dmamap_t map; 1659 struct mbuf *m; 1660 int csumflags, idx, i, error; 1661 uint32_t flags1, flags2; 1662 1663 idx = sc->sc_tx_prodidx; 1664 map = sc->sc_txsoft[idx].txs_dmamap; 1665 1666 /* align mbuf data for claim of ENET */ 1667 error = enet_encap_mbufalign(mp); 1668 if (error != 0) 1669 return error; 1670 1671 m = *mp; 1672 csumflags = m->m_pkthdr.csum_flags; 1673 1674 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1675 BUS_DMA_NOWAIT); 1676 if (error != 0) { 1677 device_printf(sc->sc_dev, 1678 "Error mapping mbuf into TX chain: error=%d\n", error); 1679 m_freem(m); 1680 return error; 1681 } 1682 1683 if (map->dm_nsegs > sc->sc_tx_free) { 1684 bus_dmamap_unload(sc->sc_dmat, map); 1685 device_printf(sc->sc_dev, 1686 "too many mbuf chain %d\n", map->dm_nsegs); 1687 m_freem(m); 1688 return ENOBUFS; 1689 } 1690 1691 /* fill protocol cksum zero beforehand */ 1692 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1693 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1694 int ehlen; 1695 uint16_t etype; 1696 1697 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1698 switch (ntohs(etype)) { 1699 case ETHERTYPE_IP: 1700 case ETHERTYPE_IPV6: 1701 ehlen = ETHER_HDR_LEN; 1702 break; 1703 case ETHERTYPE_VLAN: 1704 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1705 break; 1706 default: 1707 ehlen = 0; 1708 break; 1709 } 1710 1711 if (ehlen) { 1712 const int off = 1713 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1714 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1715 if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1716 uint16_t zero = 0; 1717 m_copyback(m, ehlen + off, sizeof(zero), &zero); 1718 } 1719 } 1720 } 1721 1722 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1723 BUS_DMASYNC_PREWRITE); 1724 1725 for (i = 0; i < map->dm_nsegs; i++) { 1726 flags1 = TXFLAGS1_R; 1727 flags2 = 0; 1728 1729 if (i == 0) { 1730 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1731 sc->sc_txsoft[idx].txs_mbuf = m; 1732 } 1733 1734 /* checksum offloading */ 1735 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1736 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1737 flags2 |= TXFLAGS2_PINS; 1738 if (csumflags & (M_CSUM_IPv4)) 1739 flags2 |= TXFLAGS2_IINS; 1740 1741 if (i == map->dm_nsegs - 1) { 1742 /* mark last segment */ 1743 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1744 flags2 |= TXFLAGS2_INT; 1745 } 1746 if (idx == ENET_TX_RING_CNT - 1) { 1747 /* mark end of ring */ 1748 flags1 |= TXFLAGS1_W; 1749 } 1750 1751 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1752 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1753 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1754 TXDESC_WRITEOUT(idx); 1755 1756 sc->sc_txdesc_ring[idx].tx_flags1_len = 1757 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1758 TXDESC_WRITEOUT(idx); 1759 1760 idx = ENET_TX_NEXTIDX(idx); 1761 sc->sc_tx_free--; 1762 } 1763 1764 sc->sc_tx_prodidx = idx; 1765 1766 return 0; 1767 } 1768 1769 /* 1770 * device initialize 1771 */ 1772 static int 1773 enet_init_regs(struct enet_softc *sc, int init) 1774 { 1775 struct mii_data *mii; 1776 struct ifmedia_entry *ife; 1777 paddr_t paddr; 1778 uint32_t val; 1779 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1780 1781 if (init) { 1782 fulldup = 1; 1783 ecr_speed = ENET_ECR_SPEED; 1784 rcr_speed = 0; 1785 flowctrl = 0; 1786 } else { 1787 mii = &sc->sc_mii; 1788 ife = mii->mii_media.ifm_cur; 1789 1790 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1791 fulldup = 1; 1792 else 1793 fulldup = 0; 1794 1795 switch (IFM_SUBTYPE(ife->ifm_media)) { 1796 case IFM_10_T: 1797 ecr_speed = 0; 1798 rcr_speed = ENET_RCR_RMII_10T; 1799 break; 1800 case IFM_100_TX: 1801 ecr_speed = 0; 1802 rcr_speed = 0; 1803 break; 1804 default: 1805 ecr_speed = ENET_ECR_SPEED; 1806 rcr_speed = 0; 1807 break; 1808 } 1809 1810 flowctrl = sc->sc_flowflags & IFM_FLOW; 1811 } 1812 1813 if (sc->sc_rgmii == 0) 1814 ecr_speed = 0; 1815 1816 /* reset */ 1817 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1818 1819 /* mask and clear all interrupt */ 1820 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1821 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1822 1823 /* full duplex */ 1824 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1825 1826 /* clear and enable MIB register */ 1827 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1828 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1829 1830 /* MII speed setup. MDCclk(=2.5MHz) = ENET_PLL/((val+1)*2) */ 1831 val = ((sc->sc_pllclock) / 500000 - 1) / 10; 1832 ENET_REG_WRITE(sc, ENET_MSCR, val << 1); 1833 1834 /* Opcode/Pause Duration */ 1835 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1836 1837 /* Receive FIFO */ 1838 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1839 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1840 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1841 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1842 1843 /* Transmit FIFO */ 1844 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1845 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1846 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1847 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1848 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1849 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1850 1851 /* hardware checksum is default off (override in TX descripter) */ 1852 ENET_REG_WRITE(sc, ENET_TACC, 0); 1853 1854 /* 1855 * align ethernet payload on 32bit, discard frames with MAC layer error, 1856 * and don't discard checksum error 1857 */ 1858 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1859 1860 /* maximum frame size */ 1861 val = ENET_DEFAULT_PKT_LEN; 1862 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1863 1864 if (sc->sc_rgmii == 0) 1865 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1866 else 1867 miimode = ENET_RCR_RGMII_EN; 1868 ENET_REG_WRITE(sc, ENET_RCR, 1869 ENET_RCR_PADEN | /* RX frame padding remove */ 1870 miimode | 1871 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1872 rcr_speed | 1873 (fulldup ? 0 : ENET_RCR_DRT) | 1874 ENET_RCR_MAX_FL(val)); 1875 1876 /* Maximum Receive BufSize per one descriptor */ 1877 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1878 1879 1880 /* TX/RX Descriptor Physical Address */ 1881 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1882 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1883 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1884 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1885 /* sync cache */ 1886 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1887 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1888 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1889 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1890 1891 /* enable interrupts */ 1892 val = ENET_EIMR|ENET_EIR_TXF|ENET_EIR_RXF|ENET_EIR_EBERR; 1893 if (sc->sc_imxtype == 7) 1894 val |= ENET_EIR_TXF2|ENET_EIR_RXF2|ENET_EIR_TXF1|ENET_EIR_RXF1; 1895 ENET_REG_WRITE(sc, ENET_EIMR, val); 1896 1897 /* enable ether */ 1898 ENET_REG_WRITE(sc, ENET_ECR, 1899 #if _BYTE_ORDER == _LITTLE_ENDIAN 1900 ENET_ECR_DBSWP | 1901 #endif 1902 ecr_speed | 1903 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1904 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1905 1906 return 0; 1907 } 1908 1909 static int 1910 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1911 bus_dmamap_t *mapp) 1912 { 1913 bus_dma_segment_t seglist[1]; 1914 int nsegs, error; 1915 1916 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1917 1, &nsegs, M_NOWAIT)) != 0) { 1918 device_printf(sc->sc_dev, 1919 "unable to allocate DMA buffer, error=%d\n", error); 1920 goto fail_alloc; 1921 } 1922 1923 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1924 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1925 device_printf(sc->sc_dev, 1926 "unable to map DMA buffer, error=%d\n", 1927 error); 1928 goto fail_map; 1929 } 1930 1931 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1932 BUS_DMA_NOWAIT, mapp)) != 0) { 1933 device_printf(sc->sc_dev, 1934 "unable to create DMA map, error=%d\n", error); 1935 goto fail_create; 1936 } 1937 1938 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1939 BUS_DMA_NOWAIT)) != 0) { 1940 aprint_error_dev(sc->sc_dev, 1941 "unable to load DMA map, error=%d\n", error); 1942 goto fail_load; 1943 } 1944 1945 return 0; 1946 1947 fail_load: 1948 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1949 fail_create: 1950 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1951 fail_map: 1952 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1953 fail_alloc: 1954 return error; 1955 } 1956