1 /* $NetBSD: if_enet.c,v 1.11 2017/06/09 18:14:59 ryo Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.11 2017/06/09 18:14:59 ryo Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/mbuf.h> 41 #include <sys/device.h> 42 #include <sys/sockio.h> 43 #include <sys/kernel.h> 44 #include <sys/rndsource.h> 45 46 #include <lib/libkern/libkern.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/if_ether.h> 52 #include <net/bpf.h> 53 #include <net/if_vlanvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 62 #include <arm/imx/if_enetreg.h> 63 #include <arm/imx/if_enetvar.h> 64 65 #undef DEBUG_ENET 66 #undef ENET_EVENT_COUNTER 67 68 #define ENET_TICK hz 69 70 #ifdef DEBUG_ENET 71 int enet_debug = 0; 72 # define DEVICE_DPRINTF(args...) \ 73 do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 #else 75 # define DEVICE_DPRINTF(args...) 76 #endif 77 78 79 #define RXDESC_MAXBUFSIZE 0x07f0 80 /* ENET does not work greather than 0x0800... */ 81 82 #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 #ifdef ENET_SUPPORT_JUMBO 84 # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 #else 86 # define ENET_MAX_PKT_LEN 1522 87 #endif 88 #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 #define MTU2FRAMESIZE(n) \ 90 ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 92 93 #define ENET_MAX_PKT_NSEGS 64 94 95 #define ENET_TX_NEXTIDX(idx) \ 96 (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 #define ENET_RX_NEXTIDX(idx) \ 98 (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 100 #define TXDESC_WRITEOUT(idx) \ 101 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 sizeof(struct enet_txdesc) * (idx), \ 103 sizeof(struct enet_txdesc), \ 104 BUS_DMASYNC_PREWRITE) 105 106 #define TXDESC_READIN(idx) \ 107 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 sizeof(struct enet_txdesc) * (idx), \ 109 sizeof(struct enet_txdesc), \ 110 BUS_DMASYNC_PREREAD) 111 112 #define RXDESC_WRITEOUT(idx) \ 113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 sizeof(struct enet_rxdesc) * (idx), \ 115 sizeof(struct enet_rxdesc), \ 116 BUS_DMASYNC_PREWRITE) 117 118 #define RXDESC_READIN(idx) \ 119 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 sizeof(struct enet_rxdesc) * (idx), \ 121 sizeof(struct enet_rxdesc), \ 122 BUS_DMASYNC_PREREAD) 123 124 #define ENET_REG_READ(sc, reg) \ 125 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 127 #define ENET_REG_WRITE(sc, reg, value) \ 128 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 130 #ifdef ENET_EVENT_COUNTER 131 static void enet_attach_evcnt(struct enet_softc *); 132 static void enet_update_evcnt(struct enet_softc *); 133 #endif 134 135 static int enet_intr(void *); 136 static void enet_tick(void *); 137 static int enet_tx_intr(void *); 138 static int enet_rx_intr(void *); 139 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 140 int); 141 142 static void enet_start(struct ifnet *); 143 static int enet_ifflags_cb(struct ethercom *); 144 static int enet_ioctl(struct ifnet *, u_long, void *); 145 static int enet_init(struct ifnet *); 146 static void enet_stop(struct ifnet *, int); 147 static void enet_watchdog(struct ifnet *); 148 static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 149 150 static int enet_miibus_readreg(device_t, int, int); 151 static void enet_miibus_writereg(device_t, int, int, int); 152 static void enet_miibus_statchg(struct ifnet *); 153 154 static void enet_gethwaddr(struct enet_softc *, uint8_t *); 155 static void enet_sethwaddr(struct enet_softc *, uint8_t *); 156 static void enet_setmulti(struct enet_softc *); 157 static int enet_encap_mbufalign(struct mbuf **); 158 static int enet_encap_txring(struct enet_softc *, struct mbuf **); 159 static int enet_init_regs(struct enet_softc *, int); 160 static int enet_alloc_ring(struct enet_softc *); 161 static void enet_init_txring(struct enet_softc *); 162 static int enet_init_rxring(struct enet_softc *); 163 static void enet_reset_rxdesc(struct enet_softc *, int); 164 static int enet_alloc_rxbuf(struct enet_softc *, int); 165 static void enet_drain_txbuf(struct enet_softc *); 166 static void enet_drain_rxbuf(struct enet_softc *); 167 static int enet_alloc_dma(struct enet_softc *, size_t, void **, 168 bus_dmamap_t *); 169 170 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc), 171 enet_match, enet_attach, NULL, NULL); 172 173 void 174 enet_attach_common(device_t self, bus_space_tag_t iot, 175 bus_dma_tag_t dmat, bus_addr_t addr, bus_size_t size, int irq) 176 { 177 struct enet_softc *sc; 178 struct ifnet *ifp; 179 180 sc = device_private(self); 181 sc->sc_dev = self; 182 sc->sc_iot = iot; 183 sc->sc_addr = addr; 184 sc->sc_dmat = dmat; 185 186 aprint_naive("\n"); 187 aprint_normal(": Gigabit Ethernet Controller\n"); 188 if (bus_space_map(sc->sc_iot, sc->sc_addr, size, 0, 189 &sc->sc_ioh)) { 190 aprint_error_dev(self, "cannot map registers\n"); 191 return; 192 } 193 194 /* allocate dma buffer */ 195 if (enet_alloc_ring(sc)) 196 return; 197 198 #define IS_ENADDR_ZERO(enaddr) \ 199 ((enaddr[0] | enaddr[1] | enaddr[2] | \ 200 enaddr[3] | enaddr[4] | enaddr[5]) == 0) 201 202 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 203 /* by any chance, mac-address is already set by bootloader? */ 204 enet_gethwaddr(sc, sc->sc_enaddr); 205 if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 206 /* give up. set randomly */ 207 uint32_t eaddr = random(); 208 /* not multicast */ 209 sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 210 sc->sc_enaddr[1] = eaddr >> 16; 211 sc->sc_enaddr[2] = eaddr >> 8; 212 sc->sc_enaddr[3] = eaddr; 213 eaddr = random(); 214 sc->sc_enaddr[4] = eaddr >> 8; 215 sc->sc_enaddr[5] = eaddr; 216 217 aprint_error_dev(self, 218 "cannot get mac address. set randomly\n"); 219 } 220 } 221 enet_sethwaddr(sc, sc->sc_enaddr); 222 223 aprint_normal_dev(self, "Ethernet address %s\n", 224 ether_sprintf(sc->sc_enaddr)); 225 226 enet_init_regs(sc, 1); 227 228 /* setup interrupt handlers */ 229 if ((sc->sc_ih = intr_establish(irq, IPL_NET, 230 IST_LEVEL, enet_intr, sc)) == NULL) { 231 aprint_error_dev(self, "unable to establish interrupt\n"); 232 goto failure; 233 } 234 235 if (sc->sc_imxtype == 7) { 236 /* i.MX7 use 3 interrupts */ 237 if ((sc->sc_ih2 = intr_establish(irq + 1, IPL_NET, 238 IST_LEVEL, enet_intr, sc)) == NULL) { 239 aprint_error_dev(self, 240 "unable to establish 2nd interrupt\n"); 241 intr_disestablish(sc->sc_ih); 242 goto failure; 243 } 244 if ((sc->sc_ih3 = intr_establish(irq + 2, IPL_NET, 245 IST_LEVEL, enet_intr, sc)) == NULL) { 246 aprint_error_dev(self, 247 "unable to establish 3rd interrupt\n"); 248 intr_disestablish(sc->sc_ih2); 249 intr_disestablish(sc->sc_ih); 250 goto failure; 251 } 252 } 253 254 /* callout will be scheduled from enet_init() */ 255 callout_init(&sc->sc_tick_ch, 0); 256 callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 257 258 /* setup ifp */ 259 ifp = &sc->sc_ethercom.ec_if; 260 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 261 ifp->if_softc = sc; 262 ifp->if_mtu = ETHERMTU; 263 ifp->if_baudrate = IF_Gbps(1); 264 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 265 ifp->if_ioctl = enet_ioctl; 266 ifp->if_start = enet_start; 267 ifp->if_init = enet_init; 268 ifp->if_stop = enet_stop; 269 ifp->if_watchdog = enet_watchdog; 270 271 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 272 #ifdef ENET_SUPPORT_JUMBO 273 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 274 #endif 275 276 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 277 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 278 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 279 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 280 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 281 282 IFQ_SET_MAXLEN(&ifp->if_snd, max(ENET_TX_RING_CNT, IFQ_MAXLEN)); 283 IFQ_SET_READY(&ifp->if_snd); 284 285 /* setup MII */ 286 sc->sc_ethercom.ec_mii = &sc->sc_mii; 287 sc->sc_mii.mii_ifp = ifp; 288 sc->sc_mii.mii_readreg = enet_miibus_readreg; 289 sc->sc_mii.mii_writereg = enet_miibus_writereg; 290 sc->sc_mii.mii_statchg = enet_miibus_statchg; 291 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 292 enet_mediastatus); 293 294 /* try to attach PHY */ 295 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 296 MII_OFFSET_ANY, 0); 297 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 298 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 299 0, NULL); 300 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 301 } else { 302 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 303 } 304 305 if_attach(ifp); 306 ether_ifattach(ifp, sc->sc_enaddr); 307 ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 308 309 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 310 RND_TYPE_NET, RND_FLAG_DEFAULT); 311 312 #ifdef ENET_EVENT_COUNTER 313 enet_attach_evcnt(sc); 314 #endif 315 316 sc->sc_stopping = false; 317 318 return; 319 320 failure: 321 bus_space_unmap(sc->sc_iot, sc->sc_ioh, size); 322 return; 323 } 324 325 #ifdef ENET_EVENT_COUNTER 326 static void 327 enet_attach_evcnt(struct enet_softc *sc) 328 { 329 const char *xname; 330 331 xname = device_xname(sc->sc_dev); 332 333 #define ENET_EVCNT_ATTACH(name) \ 334 evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 335 NULL, xname, #name); 336 337 ENET_EVCNT_ATTACH(t_drop); 338 ENET_EVCNT_ATTACH(t_packets); 339 ENET_EVCNT_ATTACH(t_bc_pkt); 340 ENET_EVCNT_ATTACH(t_mc_pkt); 341 ENET_EVCNT_ATTACH(t_crc_align); 342 ENET_EVCNT_ATTACH(t_undersize); 343 ENET_EVCNT_ATTACH(t_oversize); 344 ENET_EVCNT_ATTACH(t_frag); 345 ENET_EVCNT_ATTACH(t_jab); 346 ENET_EVCNT_ATTACH(t_col); 347 ENET_EVCNT_ATTACH(t_p64); 348 ENET_EVCNT_ATTACH(t_p65to127n); 349 ENET_EVCNT_ATTACH(t_p128to255n); 350 ENET_EVCNT_ATTACH(t_p256to511); 351 ENET_EVCNT_ATTACH(t_p512to1023); 352 ENET_EVCNT_ATTACH(t_p1024to2047); 353 ENET_EVCNT_ATTACH(t_p_gte2048); 354 ENET_EVCNT_ATTACH(t_octets); 355 ENET_EVCNT_ATTACH(r_packets); 356 ENET_EVCNT_ATTACH(r_bc_pkt); 357 ENET_EVCNT_ATTACH(r_mc_pkt); 358 ENET_EVCNT_ATTACH(r_crc_align); 359 ENET_EVCNT_ATTACH(r_undersize); 360 ENET_EVCNT_ATTACH(r_oversize); 361 ENET_EVCNT_ATTACH(r_frag); 362 ENET_EVCNT_ATTACH(r_jab); 363 ENET_EVCNT_ATTACH(r_p64); 364 ENET_EVCNT_ATTACH(r_p65to127); 365 ENET_EVCNT_ATTACH(r_p128to255); 366 ENET_EVCNT_ATTACH(r_p256to511); 367 ENET_EVCNT_ATTACH(r_p512to1023); 368 ENET_EVCNT_ATTACH(r_p1024to2047); 369 ENET_EVCNT_ATTACH(r_p_gte2048); 370 ENET_EVCNT_ATTACH(r_octets); 371 } 372 373 static void 374 enet_update_evcnt(struct enet_softc *sc) 375 { 376 sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 377 sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 378 sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 379 sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 380 sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 381 sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 382 sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 383 sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 384 sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 385 sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 386 sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 387 sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 388 sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 389 sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 390 sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 391 sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 392 sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 393 sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 394 sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 395 sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 396 sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 397 sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 398 sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 399 sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 400 sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 401 sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 402 sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 403 sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 404 sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 405 sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 406 sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 407 sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 408 sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 409 sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 410 } 411 #endif /* ENET_EVENT_COUNTER */ 412 413 static void 414 enet_tick(void *arg) 415 { 416 struct enet_softc *sc; 417 struct mii_data *mii; 418 struct ifnet *ifp; 419 int s; 420 421 sc = arg; 422 mii = &sc->sc_mii; 423 ifp = &sc->sc_ethercom.ec_if; 424 425 s = splnet(); 426 427 if (sc->sc_stopping) 428 goto out; 429 430 #ifdef ENET_EVENT_COUNTER 431 enet_update_evcnt(sc); 432 #endif 433 434 /* update counters */ 435 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 436 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 437 ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB); 438 439 /* clear counters */ 440 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 441 ENET_REG_WRITE(sc, ENET_MIBC, 0); 442 443 mii_tick(mii); 444 out: 445 446 if (!sc->sc_stopping) 447 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 448 449 splx(s); 450 } 451 452 static int 453 enet_intr(void *arg) 454 { 455 struct enet_softc *sc; 456 struct ifnet *ifp; 457 uint32_t status; 458 459 sc = arg; 460 status = ENET_REG_READ(sc, ENET_EIR); 461 462 if (sc->sc_imxtype == 7) { 463 if (status & (ENET_EIR_TXF|ENET_EIR_TXF1|ENET_EIR_TXF2)) 464 enet_tx_intr(arg); 465 if (status & (ENET_EIR_RXF|ENET_EIR_RXF1|ENET_EIR_RXF2)) 466 enet_rx_intr(arg); 467 } else { 468 if (status & ENET_EIR_TXF) 469 enet_tx_intr(arg); 470 if (status & ENET_EIR_RXF) 471 enet_rx_intr(arg); 472 } 473 474 if (status & ENET_EIR_EBERR) { 475 device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 476 ifp = &sc->sc_ethercom.ec_if; 477 enet_stop(ifp, 1); 478 enet_init(ifp); 479 } else { 480 ENET_REG_WRITE(sc, ENET_EIR, status); 481 } 482 483 rnd_add_uint32(&sc->sc_rnd_source, status); 484 485 return 1; 486 } 487 488 static int 489 enet_tx_intr(void *arg) 490 { 491 struct enet_softc *sc; 492 struct ifnet *ifp; 493 struct enet_txsoft *txs; 494 int idx; 495 496 sc = (struct enet_softc *)arg; 497 ifp = &sc->sc_ethercom.ec_if; 498 499 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 500 idx = ENET_TX_NEXTIDX(idx)) { 501 502 txs = &sc->sc_txsoft[idx]; 503 504 TXDESC_READIN(idx); 505 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 506 /* This TX Descriptor has not been transmitted yet */ 507 break; 508 } 509 510 /* txsoft is available on first segment (TXFLAGS1_T1) */ 511 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 512 bus_dmamap_unload(sc->sc_dmat, 513 txs->txs_dmamap); 514 m_freem(txs->txs_mbuf); 515 ifp->if_opackets++; 516 } 517 518 /* checking error */ 519 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 520 uint32_t flags2; 521 522 flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 523 524 if (flags2 & (TXFLAGS2_TXE | 525 TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 526 TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 527 #ifdef DEBUG_ENET 528 if (enet_debug) { 529 char flagsbuf[128]; 530 531 snprintb(flagsbuf, sizeof(flagsbuf), 532 "\20" "\20TRANSMIT" "\16UNDERFLOW" 533 "\15COLLISION" "\14FRAME" 534 "\13LATECOLLISION" "\12OVERFLOW", 535 flags2); 536 537 device_printf(sc->sc_dev, 538 "txdesc[%d]: transmit error: " 539 "flags2=%s\n", idx, flagsbuf); 540 } 541 #endif /* DEBUG_ENET */ 542 ifp->if_oerrors++; 543 } 544 } 545 546 sc->sc_tx_free++; 547 } 548 sc->sc_tx_considx = idx; 549 550 if (sc->sc_tx_free > 0) 551 ifp->if_flags &= ~IFF_OACTIVE; 552 553 /* 554 * No more pending TX descriptor, 555 * cancel the watchdog timer. 556 */ 557 if (sc->sc_tx_free == ENET_TX_RING_CNT) 558 ifp->if_timer = 0; 559 560 return 1; 561 } 562 563 static int 564 enet_rx_intr(void *arg) 565 { 566 struct enet_softc *sc; 567 struct ifnet *ifp; 568 struct enet_rxsoft *rxs; 569 int idx, len, amount; 570 uint32_t flags1, flags2; 571 struct mbuf *m, *m0, *mprev; 572 573 sc = arg; 574 ifp = &sc->sc_ethercom.ec_if; 575 576 m0 = mprev = NULL; 577 amount = 0; 578 for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 579 580 rxs = &sc->sc_rxsoft[idx]; 581 582 RXDESC_READIN(idx); 583 if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 584 /* This RX Descriptor has not been received yet */ 585 break; 586 } 587 588 /* 589 * build mbuf from RX Descriptor if needed 590 */ 591 m = rxs->rxs_mbuf; 592 rxs->rxs_mbuf = NULL; 593 594 flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 595 len = RXFLAGS1_LEN(flags1); 596 597 #define RACC_SHIFT16 2 598 if (m0 == NULL) { 599 m0 = m; 600 m_adj(m0, RACC_SHIFT16); 601 len -= RACC_SHIFT16; 602 m->m_len = len; 603 amount = len; 604 } else { 605 if (flags1 & RXFLAGS1_L) 606 len = len - amount - RACC_SHIFT16; 607 608 m->m_len = len; 609 amount += len; 610 m->m_flags &= ~M_PKTHDR; 611 mprev->m_next = m; 612 } 613 mprev = m; 614 615 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 616 617 if (flags1 & RXFLAGS1_L) { 618 /* last buffer */ 619 if ((amount < ETHER_HDR_LEN) || 620 ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 621 RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 622 (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 623 RXFLAGS2_CE)))) { 624 625 #ifdef DEBUG_ENET 626 if (enet_debug) { 627 char flags1buf[128], flags2buf[128]; 628 snprintb(flags1buf, sizeof(flags1buf), 629 "\20" "\31MISS" "\26LENGTHVIOLATION" 630 "\25NONOCTET" "\23CRC" "\22OVERRUN" 631 "\21TRUNCATED", flags1); 632 snprintb(flags2buf, sizeof(flags2buf), 633 "\20" "\40MAC" "\33PHY" 634 "\32COLLISION", flags2); 635 636 DEVICE_DPRINTF( 637 "rxdesc[%d]: receive error: " 638 "flags1=%s,flags2=%s,len=%d\n", 639 idx, flags1buf, flags2buf, amount); 640 } 641 #endif /* DEBUG_ENET */ 642 ifp->if_ierrors++; 643 m_freem(m0); 644 645 } else { 646 /* packet receive ok */ 647 m_set_rcvif(m0, ifp); 648 m0->m_pkthdr.len = amount; 649 650 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 651 rxs->rxs_dmamap->dm_mapsize, 652 BUS_DMASYNC_PREREAD); 653 654 if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 655 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 656 M_CSUM_TCPv6 | M_CSUM_UDPv6)) 657 enet_rx_csum(sc, ifp, m0, idx); 658 659 if_percpuq_enqueue(ifp->if_percpuq, m0); 660 } 661 662 m0 = NULL; 663 mprev = NULL; 664 amount = 0; 665 666 } else { 667 /* continued from previous buffer */ 668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 670 } 671 672 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 673 if (enet_alloc_rxbuf(sc, idx) != 0) { 674 panic("enet_alloc_rxbuf NULL\n"); 675 } 676 } 677 sc->sc_rx_readidx = idx; 678 679 /* re-enable RX DMA to make sure */ 680 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 681 682 return 1; 683 } 684 685 static void 686 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 687 { 688 uint32_t flags2; 689 uint8_t proto; 690 691 flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 692 693 if (flags2 & RXFLAGS2_IPV6) { 694 proto = sc->sc_rxdesc_ring[idx].rx_proto; 695 696 /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 697 if ((proto == IPPROTO_TCP) && 698 (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 699 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 700 else if ((proto == IPPROTO_UDP) && 701 (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 702 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 703 else 704 return; 705 706 /* IPv6 protocol checksum error */ 707 if (flags2 & RXFLAGS2_PCR) 708 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 709 710 } else { 711 struct ether_header *eh; 712 uint8_t *ip; 713 714 eh = mtod(m, struct ether_header *); 715 716 /* XXX: is an IPv4? */ 717 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 718 return; 719 ip = (uint8_t *)(eh + 1); 720 if ((ip[0] & 0xf0) == 0x40) 721 return; 722 723 proto = sc->sc_rxdesc_ring[idx].rx_proto; 724 if (flags2 & RXFLAGS2_ICE) { 725 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 726 m->m_pkthdr.csum_flags |= 727 M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 728 } 729 } else { 730 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 731 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 732 } 733 734 /* 735 * PCR is valid when 736 * ICE == 0 and FRAG == 0 737 */ 738 if (flags2 & RXFLAGS2_FRAG) 739 return; 740 741 /* 742 * PCR is valid when proto is TCP or UDP 743 */ 744 if ((proto == IPPROTO_TCP) && 745 (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 746 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 747 else if ((proto == IPPROTO_UDP) && 748 (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 749 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 750 else 751 return; 752 753 /* IPv4 protocol cksum error */ 754 if (flags2 & RXFLAGS2_PCR) 755 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 756 } 757 } 758 } 759 760 static void 761 enet_setmulti(struct enet_softc *sc) 762 { 763 struct ifnet *ifp; 764 struct ether_multi *enm; 765 struct ether_multistep step; 766 int promisc; 767 uint32_t crc; 768 uint32_t gaddr[2]; 769 770 ifp = &sc->sc_ethercom.ec_if; 771 772 promisc = 0; 773 if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) { 774 ifp->if_flags |= IFF_ALLMULTI; 775 if (ifp->if_flags & IFF_PROMISC) 776 promisc = 1; 777 gaddr[0] = gaddr[1] = 0xffffffff; 778 } else { 779 gaddr[0] = gaddr[1] = 0; 780 781 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 782 while (enm != NULL) { 783 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 784 gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 785 ETHER_NEXT_MULTI(step, enm); 786 } 787 } 788 789 ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]); 790 ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]); 791 792 if (promisc) { 793 /* match all packet */ 794 ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 795 ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 796 } else { 797 /* don't match any packet */ 798 ENET_REG_WRITE(sc, ENET_IAUR, 0); 799 ENET_REG_WRITE(sc, ENET_IALR, 0); 800 } 801 } 802 803 static void 804 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 805 { 806 uint32_t paddr; 807 808 paddr = ENET_REG_READ(sc, ENET_PALR); 809 hwaddr[0] = paddr >> 24; 810 hwaddr[1] = paddr >> 16; 811 hwaddr[2] = paddr >> 8; 812 hwaddr[3] = paddr; 813 814 paddr = ENET_REG_READ(sc, ENET_PAUR); 815 hwaddr[4] = paddr >> 24; 816 hwaddr[5] = paddr >> 16; 817 } 818 819 static void 820 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 821 { 822 uint32_t paddr; 823 824 paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 825 hwaddr[3]; 826 ENET_REG_WRITE(sc, ENET_PALR, paddr); 827 paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 828 ENET_REG_WRITE(sc, ENET_PAUR, paddr); 829 } 830 831 /* 832 * ifnet interfaces 833 */ 834 static int 835 enet_init(struct ifnet *ifp) 836 { 837 struct enet_softc *sc; 838 int s, error; 839 840 sc = ifp->if_softc; 841 842 s = splnet(); 843 844 enet_init_regs(sc, 0); 845 enet_init_txring(sc); 846 error = enet_init_rxring(sc); 847 if (error != 0) { 848 enet_drain_rxbuf(sc); 849 device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 850 goto init_failure; 851 } 852 853 /* reload mac address */ 854 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 855 enet_sethwaddr(sc, sc->sc_enaddr); 856 857 /* program multicast address */ 858 enet_setmulti(sc); 859 860 /* update if_flags */ 861 ifp->if_flags |= IFF_RUNNING; 862 ifp->if_flags &= ~IFF_OACTIVE; 863 864 /* update local copy of if_flags */ 865 sc->sc_if_flags = ifp->if_flags; 866 867 /* mii */ 868 mii_mediachg(&sc->sc_mii); 869 870 /* enable RX DMA */ 871 ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 872 873 sc->sc_stopping = false; 874 callout_schedule(&sc->sc_tick_ch, ENET_TICK); 875 876 init_failure: 877 splx(s); 878 879 return error; 880 } 881 882 static void 883 enet_start(struct ifnet *ifp) 884 { 885 struct enet_softc *sc; 886 struct mbuf *m; 887 int npkt; 888 889 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 890 return; 891 892 sc = ifp->if_softc; 893 for (npkt = 0; ; npkt++) { 894 IFQ_POLL(&ifp->if_snd, m); 895 if (m == NULL) 896 break; 897 898 if (sc->sc_tx_free <= 0) { 899 /* no tx descriptor now... */ 900 ifp->if_flags |= IFF_OACTIVE; 901 DEVICE_DPRINTF("TX descriptor is full\n"); 902 break; 903 } 904 905 IFQ_DEQUEUE(&ifp->if_snd, m); 906 907 if (enet_encap_txring(sc, &m) != 0) { 908 /* too many mbuf chains? */ 909 ifp->if_flags |= IFF_OACTIVE; 910 DEVICE_DPRINTF( 911 "TX descriptor is full. dropping packet\n"); 912 m_freem(m); 913 ifp->if_oerrors++; 914 break; 915 } 916 917 /* Pass the packet to any BPF listeners */ 918 bpf_mtap(ifp, m); 919 } 920 921 if (npkt) { 922 /* enable TX DMA */ 923 ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 924 925 ifp->if_timer = 5; 926 } 927 } 928 929 static void 930 enet_stop(struct ifnet *ifp, int disable) 931 { 932 struct enet_softc *sc; 933 int s; 934 uint32_t v; 935 936 sc = ifp->if_softc; 937 938 s = splnet(); 939 940 sc->sc_stopping = true; 941 callout_stop(&sc->sc_tick_ch); 942 943 /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 944 v = ENET_REG_READ(sc, ENET_ECR); 945 ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 946 947 /* Mark the interface as down and cancel the watchdog timer. */ 948 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 949 ifp->if_timer = 0; 950 951 if (disable) { 952 enet_drain_txbuf(sc); 953 enet_drain_rxbuf(sc); 954 } 955 956 splx(s); 957 } 958 959 static void 960 enet_watchdog(struct ifnet *ifp) 961 { 962 struct enet_softc *sc; 963 int s; 964 965 sc = ifp->if_softc; 966 s = splnet(); 967 968 device_printf(sc->sc_dev, "watchdog timeout\n"); 969 ifp->if_oerrors++; 970 971 /* salvage packets left in descriptors */ 972 enet_tx_intr(sc); 973 enet_rx_intr(sc); 974 975 /* reset */ 976 enet_stop(ifp, 1); 977 enet_init(ifp); 978 979 splx(s); 980 } 981 982 static void 983 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 984 { 985 struct enet_softc *sc = ifp->if_softc; 986 987 ether_mediastatus(ifp, ifmr); 988 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 989 | sc->sc_flowflags; 990 } 991 992 static int 993 enet_ifflags_cb(struct ethercom *ec) 994 { 995 struct ifnet *ifp = &ec->ec_if; 996 struct enet_softc *sc = ifp->if_softc; 997 int change = ifp->if_flags ^ sc->sc_if_flags; 998 999 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1000 return ENETRESET; 1001 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 1002 return 0; 1003 1004 enet_setmulti(sc); 1005 1006 sc->sc_if_flags = ifp->if_flags; 1007 return 0; 1008 } 1009 1010 static int 1011 enet_ioctl(struct ifnet *ifp, u_long command, void *data) 1012 { 1013 struct enet_softc *sc; 1014 struct ifreq *ifr; 1015 int s, error; 1016 uint32_t v; 1017 1018 sc = ifp->if_softc; 1019 ifr = data; 1020 1021 error = 0; 1022 1023 s = splnet(); 1024 1025 switch (command) { 1026 case SIOCSIFMTU: 1027 if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 1028 error = EINVAL; 1029 } else { 1030 ifp->if_mtu = ifr->ifr_mtu; 1031 1032 /* set maximum frame length */ 1033 v = MTU2FRAMESIZE(ifr->ifr_mtu); 1034 ENET_REG_WRITE(sc, ENET_FTRL, v); 1035 v = ENET_REG_READ(sc, ENET_RCR); 1036 v &= ~ENET_RCR_MAX_FL(0x3fff); 1037 v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 1038 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1039 ENET_REG_WRITE(sc, ENET_RCR, v); 1040 } 1041 break; 1042 case SIOCSIFMEDIA: 1043 case SIOCGIFMEDIA: 1044 /* Flow control requires full-duplex mode. */ 1045 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1046 (ifr->ifr_media & IFM_FDX) == 0) 1047 ifr->ifr_media &= ~IFM_ETH_FMASK; 1048 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1049 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1050 /* We can do both TXPAUSE and RXPAUSE. */ 1051 ifr->ifr_media |= 1052 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1053 } 1054 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1055 } 1056 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1057 break; 1058 default: 1059 error = ether_ioctl(ifp, command, data); 1060 if (error != ENETRESET) 1061 break; 1062 1063 /* post-process */ 1064 error = 0; 1065 switch (command) { 1066 case SIOCSIFCAP: 1067 error = (*ifp->if_init)(ifp); 1068 break; 1069 case SIOCADDMULTI: 1070 case SIOCDELMULTI: 1071 if (ifp->if_flags & IFF_RUNNING) 1072 enet_setmulti(sc); 1073 break; 1074 } 1075 break; 1076 } 1077 1078 splx(s); 1079 1080 return error; 1081 } 1082 1083 /* 1084 * for MII 1085 */ 1086 static int 1087 enet_miibus_readreg(device_t dev, int phy, int reg) 1088 { 1089 struct enet_softc *sc; 1090 int timeout; 1091 uint32_t val, status; 1092 1093 sc = device_private(dev); 1094 1095 /* clear MII update */ 1096 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1097 1098 /* read command */ 1099 ENET_REG_WRITE(sc, ENET_MMFR, 1100 ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1101 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1102 1103 /* check MII update */ 1104 for (timeout = 5000; timeout > 0; --timeout) { 1105 status = ENET_REG_READ(sc, ENET_EIR); 1106 if (status & ENET_EIR_MII) 1107 break; 1108 } 1109 if (timeout <= 0) { 1110 DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1111 reg); 1112 val = -1; 1113 } else { 1114 val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1115 } 1116 1117 return val; 1118 } 1119 1120 static void 1121 enet_miibus_writereg(device_t dev, int phy, int reg, int val) 1122 { 1123 struct enet_softc *sc; 1124 int timeout; 1125 1126 sc = device_private(dev); 1127 1128 /* clear MII update */ 1129 ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1130 1131 /* write command */ 1132 ENET_REG_WRITE(sc, ENET_MMFR, 1133 ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1134 ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1135 (ENET_MMFR_DATAMASK & val)); 1136 1137 /* check MII update */ 1138 for (timeout = 5000; timeout > 0; --timeout) { 1139 if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1140 break; 1141 } 1142 if (timeout <= 0) { 1143 DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", 1144 reg); 1145 } 1146 } 1147 1148 static void 1149 enet_miibus_statchg(struct ifnet *ifp) 1150 { 1151 struct enet_softc *sc; 1152 struct mii_data *mii; 1153 struct ifmedia_entry *ife; 1154 uint32_t ecr, ecr0; 1155 uint32_t rcr, rcr0; 1156 uint32_t tcr, tcr0; 1157 1158 sc = ifp->if_softc; 1159 mii = &sc->sc_mii; 1160 ife = mii->mii_media.ifm_cur; 1161 1162 /* get current status */ 1163 ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1164 rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1165 tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1166 1167 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1168 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1169 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1170 mii->mii_media_active &= ~IFM_ETH_FMASK; 1171 } 1172 1173 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1174 tcr |= ENET_TCR_FDEN; /* full duplex */ 1175 rcr &= ~ENET_RCR_DRT;; /* enable receive on transmit */ 1176 } else { 1177 tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1178 rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1179 } 1180 1181 if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1182 /* 1183 * need to reset because 1184 * FDEN can change when ECR[ETHEREN] is 0 1185 */ 1186 enet_init_regs(sc, 0); 1187 return; 1188 } 1189 1190 switch (IFM_SUBTYPE(ife->ifm_media)) { 1191 case IFM_AUTO: 1192 case IFM_1000_T: 1193 ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1194 rcr &= ~ENET_RCR_RMII_10T; 1195 break; 1196 case IFM_100_TX: 1197 ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1198 rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1199 break; 1200 case IFM_10_T: 1201 ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1202 rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1203 break; 1204 default: 1205 ecr = ecr0; 1206 rcr = rcr0; 1207 tcr = tcr0; 1208 break; 1209 } 1210 1211 if (sc->sc_rgmii == 0) 1212 ecr &= ~ENET_ECR_SPEED; 1213 1214 if (sc->sc_flowflags & IFM_FLOW) 1215 rcr |= ENET_RCR_FCE; 1216 else 1217 rcr &= ~ENET_RCR_FCE; 1218 1219 /* update registers if need change */ 1220 if (ecr != ecr0) 1221 ENET_REG_WRITE(sc, ENET_ECR, ecr); 1222 if (rcr != rcr0) 1223 ENET_REG_WRITE(sc, ENET_RCR, rcr); 1224 if (tcr != tcr0) 1225 ENET_REG_WRITE(sc, ENET_TCR, tcr); 1226 } 1227 1228 /* 1229 * handling descriptors 1230 */ 1231 static void 1232 enet_init_txring(struct enet_softc *sc) 1233 { 1234 int i; 1235 1236 /* build TX ring */ 1237 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1238 sc->sc_txdesc_ring[i].tx_flags1_len = 1239 ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1240 sc->sc_txdesc_ring[i].tx_databuf = 0; 1241 sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1242 sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1243 sc->sc_txdesc_ring[i].tx_flags3 = 0; 1244 sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1245 sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1246 sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1247 1248 TXDESC_WRITEOUT(i); 1249 } 1250 1251 sc->sc_tx_free = ENET_TX_RING_CNT; 1252 sc->sc_tx_considx = 0; 1253 sc->sc_tx_prodidx = 0; 1254 } 1255 1256 static int 1257 enet_init_rxring(struct enet_softc *sc) 1258 { 1259 int i, error; 1260 1261 /* build RX ring */ 1262 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1263 error = enet_alloc_rxbuf(sc, i); 1264 if (error != 0) 1265 return error; 1266 } 1267 1268 sc->sc_rx_readidx = 0; 1269 1270 return 0; 1271 } 1272 1273 static int 1274 enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1275 { 1276 struct mbuf *m; 1277 int error; 1278 1279 KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1280 1281 /* free mbuf if already allocated */ 1282 if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1283 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1284 m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1285 sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1286 } 1287 1288 /* allocate new mbuf cluster */ 1289 MGETHDR(m, M_DONTWAIT, MT_DATA); 1290 if (m == NULL) 1291 return ENOBUFS; 1292 MCLGET(m, M_DONTWAIT); 1293 if (!(m->m_flags & M_EXT)) { 1294 m_freem(m); 1295 return ENOBUFS; 1296 } 1297 m->m_len = MCLBYTES; 1298 m->m_next = NULL; 1299 1300 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1301 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1302 BUS_DMA_READ | BUS_DMA_NOWAIT); 1303 if (error) { 1304 m_freem(m); 1305 return error; 1306 } 1307 1308 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1309 sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1311 1312 sc->sc_rxsoft[idx].rxs_mbuf = m; 1313 enet_reset_rxdesc(sc, idx); 1314 return 0; 1315 } 1316 1317 static void 1318 enet_reset_rxdesc(struct enet_softc *sc, int idx) 1319 { 1320 uint32_t paddr; 1321 1322 paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1323 1324 sc->sc_rxdesc_ring[idx].rx_flags1_len = 1325 RXFLAGS1_E | 1326 ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1327 sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1328 sc->sc_rxdesc_ring[idx].rx_flags2 = 1329 RXFLAGS2_INT; 1330 sc->sc_rxdesc_ring[idx].rx_hl = 0; 1331 sc->sc_rxdesc_ring[idx].rx_proto = 0; 1332 sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1333 sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1334 sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1335 sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1336 sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1337 1338 RXDESC_WRITEOUT(idx); 1339 } 1340 1341 static void 1342 enet_drain_txbuf(struct enet_softc *sc) 1343 { 1344 int idx; 1345 struct enet_txsoft *txs; 1346 struct ifnet *ifp; 1347 1348 ifp = &sc->sc_ethercom.ec_if; 1349 1350 for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1351 idx = ENET_TX_NEXTIDX(idx)) { 1352 1353 /* txsoft[] is used only first segment */ 1354 txs = &sc->sc_txsoft[idx]; 1355 TXDESC_READIN(idx); 1356 if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1357 sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1358 bus_dmamap_unload(sc->sc_dmat, 1359 txs->txs_dmamap); 1360 m_freem(txs->txs_mbuf); 1361 1362 ifp->if_oerrors++; 1363 } 1364 sc->sc_tx_free++; 1365 } 1366 } 1367 1368 static void 1369 enet_drain_rxbuf(struct enet_softc *sc) 1370 { 1371 int i; 1372 1373 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1374 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1375 sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1376 bus_dmamap_unload(sc->sc_dmat, 1377 sc->sc_rxsoft[i].rxs_dmamap); 1378 m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1379 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1380 } 1381 } 1382 } 1383 1384 static int 1385 enet_alloc_ring(struct enet_softc *sc) 1386 { 1387 int i, error; 1388 1389 /* 1390 * build DMA maps for TX. 1391 * TX descriptor must be able to contain mbuf chains, 1392 * so, make up ENET_MAX_PKT_NSEGS dmamap. 1393 */ 1394 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1395 error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1396 ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1397 &sc->sc_txsoft[i].txs_dmamap); 1398 1399 if (error) { 1400 aprint_error_dev(sc->sc_dev, 1401 "can't create DMA map for TX descs\n"); 1402 goto fail_1; 1403 } 1404 } 1405 1406 /* 1407 * build DMA maps for RX. 1408 * RX descripter contains An mbuf cluster, 1409 * and make up a dmamap. 1410 */ 1411 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1412 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1413 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1414 &sc->sc_rxsoft[i].rxs_dmamap); 1415 if (error) { 1416 aprint_error_dev(sc->sc_dev, 1417 "can't create DMA map for RX descs\n"); 1418 goto fail_2; 1419 } 1420 } 1421 1422 if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1423 (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1424 return -1; 1425 memset(sc->sc_txdesc_ring, 0, 1426 sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1427 1428 if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1429 (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1430 return -1; 1431 memset(sc->sc_rxdesc_ring, 0, 1432 sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1433 1434 return 0; 1435 1436 fail_2: 1437 for (i = 0; i < ENET_RX_RING_CNT; i++) { 1438 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1439 bus_dmamap_destroy(sc->sc_dmat, 1440 sc->sc_rxsoft[i].rxs_dmamap); 1441 } 1442 fail_1: 1443 for (i = 0; i < ENET_TX_RING_CNT; i++) { 1444 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1445 bus_dmamap_destroy(sc->sc_dmat, 1446 sc->sc_txsoft[i].txs_dmamap); 1447 } 1448 return error; 1449 } 1450 1451 static int 1452 enet_encap_mbufalign(struct mbuf **mp) 1453 { 1454 struct mbuf *m, *m0, *mt, *p, *x; 1455 void *ap; 1456 uint32_t alignoff, chiplen; 1457 1458 /* 1459 * iMX6 SoC ethernet controller requires 1460 * address of buffer must aligned 8, and 1461 * length of buffer must be greater than 10 (first fragment only?) 1462 */ 1463 #define ALIGNBYTE 8 1464 #define MINBUFSIZE 10 1465 #define ALIGN_PTR(p, align) \ 1466 (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1467 1468 m0 = *mp; 1469 mt = p = NULL; 1470 for (m = m0; m != NULL; m = m->m_next) { 1471 alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1472 if (m->m_len < (ALIGNBYTE * 2)) { 1473 /* 1474 * rearrange mbuf data aligned 1475 * 1476 * align 8 * * * * * 1477 * +0123456789abcdef0123456789abcdef0 1478 * FROM m->m_data[___________abcdefghijklmn_______] 1479 * 1480 * +0123456789abcdef0123456789abcdef0 1481 * TO m->m_data[________abcdefghijklm___________] or 1482 * m->m_data[________________abcdefghijklmn__] 1483 */ 1484 if ((alignoff != 0) && (m->m_len != 0)) { 1485 chiplen = ALIGNBYTE - alignoff; 1486 if (M_LEADINGSPACE(m) >= alignoff) { 1487 ap = m->m_data - alignoff; 1488 memmove(ap, m->m_data, m->m_len); 1489 m->m_data = ap; 1490 } else if (M_TRAILINGSPACE(m) >= chiplen) { 1491 ap = m->m_data + chiplen; 1492 memmove(ap, m->m_data, m->m_len); 1493 m->m_data = ap; 1494 } else { 1495 /* 1496 * no space to align data. (M_READONLY?) 1497 * allocate new mbuf aligned, 1498 * and copy to it. 1499 */ 1500 MGET(x, M_DONTWAIT, m->m_type); 1501 if (x == NULL) { 1502 m_freem(m); 1503 return ENOBUFS; 1504 } 1505 MCLAIM(x, m->m_owner); 1506 if (m->m_flags & M_PKTHDR) 1507 M_MOVE_PKTHDR(x, m); 1508 x->m_len = m->m_len; 1509 x->m_data = ALIGN_PTR(x->m_data, 1510 ALIGNBYTE); 1511 memcpy(mtod(x, void *), mtod(m, void *), 1512 m->m_len); 1513 p->m_next = x; 1514 x->m_next = m_free(m); 1515 m = x; 1516 } 1517 } 1518 1519 /* 1520 * fill 1st mbuf at least 10byte 1521 * 1522 * align 8 * * * * * 1523 * +0123456789abcdef0123456789abcdef0 1524 * FROM m->m_data[________abcde___________________] 1525 * m->m_data[__fg____________________________] 1526 * m->m_data[_________________hi_____________] 1527 * m->m_data[__________jk____________________] 1528 * m->m_data[____l___________________________] 1529 * 1530 * +0123456789abcdef0123456789abcdef0 1531 * TO m->m_data[________abcdefghij______________] 1532 * m->m_data[________________________________] 1533 * m->m_data[________________________________] 1534 * m->m_data[___________k____________________] 1535 * m->m_data[____l___________________________] 1536 */ 1537 if (mt == NULL) { 1538 mt = m; 1539 while (mt->m_len == 0) { 1540 mt = mt->m_next; 1541 if (mt == NULL) { 1542 m_freem(m); 1543 return ENOBUFS; 1544 } 1545 } 1546 1547 /* mt = 1st mbuf, x = 2nd mbuf */ 1548 x = mt->m_next; 1549 while (mt->m_len < MINBUFSIZE) { 1550 if (x == NULL) { 1551 m_freem(m); 1552 return ENOBUFS; 1553 } 1554 1555 alignoff = (uintptr_t)x->m_data & 1556 (ALIGNBYTE - 1); 1557 chiplen = ALIGNBYTE - alignoff; 1558 if (chiplen > x->m_len) { 1559 chiplen = x->m_len; 1560 } else if ((mt->m_len + chiplen) < 1561 MINBUFSIZE) { 1562 /* 1563 * next mbuf should be greater 1564 * than ALIGNBYTE? 1565 */ 1566 if (x->m_len >= (chiplen + 1567 ALIGNBYTE * 2)) 1568 chiplen += ALIGNBYTE; 1569 else 1570 chiplen = x->m_len; 1571 } 1572 1573 if (chiplen && 1574 (M_TRAILINGSPACE(mt) < chiplen)) { 1575 /* 1576 * move data to the begining of 1577 * m_dat[] (aligned) to en- 1578 * large trailingspace 1579 */ 1580 if (mt->m_flags & M_EXT) { 1581 ap = mt->m_ext.ext_buf; 1582 } else if (mt->m_flags & 1583 M_PKTHDR) { 1584 ap = mt->m_pktdat; 1585 } else { 1586 ap = mt->m_dat; 1587 } 1588 ap = ALIGN_PTR(ap, ALIGNBYTE); 1589 memcpy(ap, mt->m_data, 1590 mt->m_len); 1591 mt->m_data = ap; 1592 } 1593 1594 if (chiplen && 1595 (M_TRAILINGSPACE(mt) >= chiplen)) { 1596 memcpy(mt->m_data + mt->m_len, 1597 x->m_data, chiplen); 1598 mt->m_len += chiplen; 1599 m_adj(x, chiplen); 1600 } 1601 1602 x = x->m_next; 1603 } 1604 } 1605 1606 } else { 1607 mt = m; 1608 1609 /* 1610 * allocate new mbuf x, and rearrange as below; 1611 * 1612 * align 8 * * * * * 1613 * +0123456789abcdef0123456789abcdef0 1614 * FROM m->m_data[____________abcdefghijklmnopq___] 1615 * 1616 * +0123456789abcdef0123456789abcdef0 1617 * TO x->m_data[________abcdefghijkl____________] 1618 * m->m_data[________________________mnopq___] 1619 * 1620 */ 1621 if (alignoff != 0) { 1622 /* at least ALIGNBYTE */ 1623 chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1624 1625 MGET(x, M_DONTWAIT, m->m_type); 1626 if (x == NULL) { 1627 m_freem(m); 1628 return ENOBUFS; 1629 } 1630 MCLAIM(x, m->m_owner); 1631 if (m->m_flags & M_PKTHDR) 1632 M_MOVE_PKTHDR(x, m); 1633 x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1634 memcpy(mtod(x, void *), mtod(m, void *), 1635 chiplen); 1636 x->m_len = chiplen; 1637 x->m_next = m; 1638 m_adj(m, chiplen); 1639 1640 if (p == NULL) 1641 m0 = x; 1642 else 1643 p->m_next = x; 1644 } 1645 } 1646 p = m; 1647 } 1648 *mp = m0; 1649 1650 return 0; 1651 } 1652 1653 static int 1654 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1655 { 1656 bus_dmamap_t map; 1657 struct mbuf *m; 1658 int csumflags, idx, i, error; 1659 uint32_t flags1, flags2; 1660 1661 idx = sc->sc_tx_prodidx; 1662 map = sc->sc_txsoft[idx].txs_dmamap; 1663 1664 /* align mbuf data for claim of ENET */ 1665 error = enet_encap_mbufalign(mp); 1666 if (error != 0) 1667 return error; 1668 1669 m = *mp; 1670 csumflags = m->m_pkthdr.csum_flags; 1671 1672 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1673 BUS_DMA_NOWAIT); 1674 if (error != 0) { 1675 device_printf(sc->sc_dev, 1676 "Error mapping mbuf into TX chain: error=%d\n", error); 1677 m_freem(m); 1678 return error; 1679 } 1680 1681 if (map->dm_nsegs > sc->sc_tx_free) { 1682 bus_dmamap_unload(sc->sc_dmat, map); 1683 device_printf(sc->sc_dev, 1684 "too many mbuf chain %d\n", map->dm_nsegs); 1685 m_freem(m); 1686 return ENOBUFS; 1687 } 1688 1689 /* fill protocol cksum zero beforehand */ 1690 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1691 M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1692 struct mbuf *m1; 1693 int ehlen, moff; 1694 uint16_t etype; 1695 1696 m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1697 switch (ntohs(etype)) { 1698 case ETHERTYPE_IP: 1699 case ETHERTYPE_IPV6: 1700 ehlen = ETHER_HDR_LEN; 1701 break; 1702 case ETHERTYPE_VLAN: 1703 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1704 break; 1705 default: 1706 ehlen = 0; 1707 break; 1708 } 1709 1710 if (ehlen) { 1711 m1 = m_getptr(m, ehlen + 1712 M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1713 M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data), 1714 &moff); 1715 if (m1 != NULL) 1716 *(uint16_t *)(mtod(m1, char *) + moff) = 0; 1717 } 1718 } 1719 1720 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1721 BUS_DMASYNC_PREWRITE); 1722 1723 for (i = 0; i < map->dm_nsegs; i++) { 1724 flags1 = TXFLAGS1_R; 1725 flags2 = 0; 1726 1727 if (i == 0) { 1728 flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1729 sc->sc_txsoft[idx].txs_mbuf = m; 1730 } 1731 1732 /* checksum offloading */ 1733 if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1734 M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1735 flags2 |= TXFLAGS2_PINS; 1736 if (csumflags & (M_CSUM_IPv4)) 1737 flags2 |= TXFLAGS2_IINS; 1738 1739 if (i == map->dm_nsegs - 1) { 1740 /* mark last segment */ 1741 flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1742 flags2 |= TXFLAGS2_INT; 1743 } 1744 if (idx == ENET_TX_RING_CNT - 1) { 1745 /* mark end of ring */ 1746 flags1 |= TXFLAGS1_W; 1747 } 1748 1749 sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1750 sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1751 sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1752 TXDESC_WRITEOUT(idx); 1753 1754 sc->sc_txdesc_ring[idx].tx_flags1_len = 1755 flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1756 TXDESC_WRITEOUT(idx); 1757 1758 idx = ENET_TX_NEXTIDX(idx); 1759 sc->sc_tx_free--; 1760 } 1761 1762 sc->sc_tx_prodidx = idx; 1763 1764 return 0; 1765 } 1766 1767 /* 1768 * device initialize 1769 */ 1770 static int 1771 enet_init_regs(struct enet_softc *sc, int init) 1772 { 1773 struct mii_data *mii; 1774 struct ifmedia_entry *ife; 1775 paddr_t paddr; 1776 uint32_t val; 1777 int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1778 1779 if (init) { 1780 fulldup = 1; 1781 ecr_speed = ENET_ECR_SPEED; 1782 rcr_speed = 0; 1783 flowctrl = 0; 1784 } else { 1785 mii = &sc->sc_mii; 1786 ife = mii->mii_media.ifm_cur; 1787 1788 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) 1789 fulldup = 1; 1790 else 1791 fulldup = 0; 1792 1793 switch (IFM_SUBTYPE(ife->ifm_media)) { 1794 case IFM_10_T: 1795 ecr_speed = 0; 1796 rcr_speed = ENET_RCR_RMII_10T; 1797 break; 1798 case IFM_100_TX: 1799 ecr_speed = 0; 1800 rcr_speed = 0; 1801 break; 1802 default: 1803 ecr_speed = ENET_ECR_SPEED; 1804 rcr_speed = 0; 1805 break; 1806 } 1807 1808 flowctrl = sc->sc_flowflags & IFM_FLOW; 1809 } 1810 1811 if (sc->sc_rgmii == 0) 1812 ecr_speed = 0; 1813 1814 /* reset */ 1815 ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1816 1817 /* mask and clear all interrupt */ 1818 ENET_REG_WRITE(sc, ENET_EIMR, 0); 1819 ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1820 1821 /* full duplex */ 1822 ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1823 1824 /* clear and enable MIB register */ 1825 ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1826 ENET_REG_WRITE(sc, ENET_MIBC, 0); 1827 1828 /* MII speed setup. MDCclk(=2.5MHz) = ENET_PLL/((val+1)*2) */ 1829 val = ((sc->sc_pllclock) / 500000 - 1) / 10; 1830 ENET_REG_WRITE(sc, ENET_MSCR, val << 1); 1831 1832 /* Opcode/Pause Duration */ 1833 ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1834 1835 /* Receive FIFO */ 1836 ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1837 ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1838 ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1839 ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1840 1841 /* Transmit FIFO */ 1842 ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1843 ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1844 ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1845 ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1846 ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1847 ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1848 1849 /* hardware checksum is default off (override in TX descripter) */ 1850 ENET_REG_WRITE(sc, ENET_TACC, 0); 1851 1852 /* 1853 * align ethernet payload on 32bit, discard frames with MAC layer error, 1854 * and don't discard checksum error 1855 */ 1856 ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1857 1858 /* maximum frame size */ 1859 val = ENET_DEFAULT_PKT_LEN; 1860 ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1861 1862 if (sc->sc_rgmii == 0) 1863 miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1864 else 1865 miimode = ENET_RCR_RGMII_EN; 1866 ENET_REG_WRITE(sc, ENET_RCR, 1867 ENET_RCR_PADEN | /* RX frame padding remove */ 1868 miimode | 1869 (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1870 rcr_speed | 1871 (fulldup ? 0 : ENET_RCR_DRT) | 1872 ENET_RCR_MAX_FL(val)); 1873 1874 /* Maximum Receive BufSize per one descriptor */ 1875 ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1876 1877 1878 /* TX/RX Descriptor Physical Address */ 1879 paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1880 ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1881 paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1882 ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1883 /* sync cache */ 1884 bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1885 sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1886 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1887 sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1888 1889 /* enable interrupts */ 1890 val = ENET_EIMR|ENET_EIR_TXF|ENET_EIR_RXF|ENET_EIR_EBERR; 1891 if (sc->sc_imxtype == 7) 1892 val |= ENET_EIR_TXF2|ENET_EIR_RXF2|ENET_EIR_TXF1|ENET_EIR_RXF1; 1893 ENET_REG_WRITE(sc, ENET_EIMR, val); 1894 1895 /* enable ether */ 1896 ENET_REG_WRITE(sc, ENET_ECR, 1897 #if _BYTE_ORDER == _LITTLE_ENDIAN 1898 ENET_ECR_DBSWP | 1899 #endif 1900 ecr_speed | 1901 ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1902 ENET_ECR_ETHEREN); /* Ethernet Enable */ 1903 1904 return 0; 1905 } 1906 1907 static int 1908 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1909 bus_dmamap_t *mapp) 1910 { 1911 bus_dma_segment_t seglist[1]; 1912 int nsegs, error; 1913 1914 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1915 1, &nsegs, M_NOWAIT)) != 0) { 1916 device_printf(sc->sc_dev, 1917 "unable to allocate DMA buffer, error=%d\n", error); 1918 goto fail_alloc; 1919 } 1920 1921 if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1922 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1923 device_printf(sc->sc_dev, 1924 "unable to map DMA buffer, error=%d\n", 1925 error); 1926 goto fail_map; 1927 } 1928 1929 if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1930 BUS_DMA_NOWAIT, mapp)) != 0) { 1931 device_printf(sc->sc_dev, 1932 "unable to create DMA map, error=%d\n", error); 1933 goto fail_create; 1934 } 1935 1936 if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1937 BUS_DMA_NOWAIT)) != 0) { 1938 aprint_error_dev(sc->sc_dev, 1939 "unable to load DMA map, error=%d\n", error); 1940 goto fail_load; 1941 } 1942 1943 return 0; 1944 1945 fail_load: 1946 bus_dmamap_destroy(sc->sc_dmat, *mapp); 1947 fail_create: 1948 bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1949 fail_map: 1950 bus_dmamem_free(sc->sc_dmat, seglist, 1); 1951 fail_alloc: 1952 return error; 1953 } 1954