xref: /netbsd-src/sys/arch/arm/imx/if_enet.c (revision aad9773e38ed2370a628a6416e098f9008fc10a7)
1 /*	$NetBSD: if_enet.c,v 1.1 2014/09/25 05:05:28 ryo Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 Ryo Shimizu <ryo@nerv.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * i.MX6 10/100/1000-Mbps ethernet MAC (ENET)
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.1 2014/09/25 05:05:28 ryo Exp $");
35 
36 #include "imxocotp.h"
37 #include "imxccm.h"
38 #include "vlan.h"
39 
40 #include <sys/param.h>
41 #include <sys/bus.h>
42 #include <sys/mbuf.h>
43 #include <sys/device.h>
44 #include <sys/sockio.h>
45 #include <sys/kernel.h>
46 #include <sys/rnd.h>
47 
48 #include <lib/libkern/libkern.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_ether.h>
54 #include <net/bpf.h>
55 #include <net/if_vlanvar.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63 
64 #include <arm/imx/imx6var.h>
65 #include <arm/imx/imx6_reg.h>
66 #include <arm/imx/imx6_ocotpreg.h>
67 #include <arm/imx/imx6_ocotpvar.h>
68 #include <arm/imx/imx6_ccmreg.h>
69 #include <arm/imx/imx6_ccmvar.h>
70 #include <arm/imx/if_enetreg.h>
71 #include "locators.h"
72 
73 #undef DEBUG_ENET
74 #undef ENET_EVENT_COUNTER
75 
76 #ifdef DEBUG_ENET
77 int enet_debug = 0;
78 # define DEVICE_DPRINTF(args...)	\
79 	do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0)
80 #else
81 # define DEVICE_DPRINTF(args...)
82 #endif
83 
84 
85 #define RXDESC_MAXBUFSIZE	0x07f0
86 				/* iMX6 ENET not work greather than 0x0800... */
87 
88 #undef ENET_SUPPORT_JUMBO	/* JUMBO FRAME SUPPORT is unstable */
89 #ifdef ENET_SUPPORT_JUMBO
90 # define ENET_MAX_PKT_LEN	4034	/* MAX FIFO LEN */
91 #else
92 # define ENET_MAX_PKT_LEN	1522
93 #endif
94 #define ENET_DEFAULT_PKT_LEN	1522	/* including VLAN tag */
95 #define MTU2FRAMESIZE(n)	\
96 	((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN)
97 
98 
99 #define ENET_MAX_PKT_NSEGS	64
100 #define ENET_TX_RING_CNT	256	/* must be 2^n */
101 #define ENET_RX_RING_CNT	256	/* must be 2^n */
102 
103 #define ENET_TX_NEXTIDX(idx)	(((idx) + 1) & (ENET_TX_RING_CNT - 1))
104 #define ENET_RX_NEXTIDX(idx)	(((idx) + 1) & (ENET_RX_RING_CNT - 1))
105 
106 struct enet_txsoft {
107 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
108 	bus_dmamap_t txs_dmamap;	/* our DMA map */
109 };
110 
111 struct enet_rxsoft {
112 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
113 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
114 };
115 
116 struct enet_softc {
117 	device_t sc_dev;
118 
119 	bus_addr_t sc_addr;
120 	bus_space_tag_t sc_iot;
121 	bus_space_handle_t sc_ioh;
122 	bus_dma_tag_t sc_dmat;
123 
124 	/* interrupts */
125 	void *sc_ih;
126 	callout_t sc_tick_ch;
127 	bool sc_stopping;
128 
129 	/* TX */
130 	struct enet_txdesc *sc_txdesc_ring;	/* [ENET_TX_RING_CNT] */
131 	bus_dmamap_t sc_txdesc_dmamap;
132 	struct enet_rxdesc *sc_rxdesc_ring;	/* [ENET_RX_RING_CNT] */
133 	bus_dmamap_t sc_rxdesc_dmamap;
134 	struct enet_txsoft sc_txsoft[ENET_TX_RING_CNT];
135 	int sc_tx_considx;
136 	int sc_tx_prodidx;
137 	int sc_tx_free;
138 
139 	/* RX */
140 	struct enet_rxsoft sc_rxsoft[ENET_RX_RING_CNT];
141 	int sc_rx_readidx;
142 
143 	/* misc */
144 	int sc_if_flags;			/* local copy of if_flags */
145 	int sc_flowflags;			/* 802.3x flow control flags */
146 	struct ethercom sc_ethercom;		/* interface info */
147 	struct mii_data sc_mii;
148 	uint8_t sc_enaddr[ETHER_ADDR_LEN];
149 	krndsource_t sc_rnd_source;
150 
151 #ifdef ENET_EVENT_COUNTER
152 	struct evcnt sc_ev_t_drop;
153 	struct evcnt sc_ev_t_packets;
154 	struct evcnt sc_ev_t_bc_pkt;
155 	struct evcnt sc_ev_t_mc_pkt;
156 	struct evcnt sc_ev_t_crc_align;
157 	struct evcnt sc_ev_t_undersize;
158 	struct evcnt sc_ev_t_oversize;
159 	struct evcnt sc_ev_t_frag;
160 	struct evcnt sc_ev_t_jab;
161 	struct evcnt sc_ev_t_col;
162 	struct evcnt sc_ev_t_p64;
163 	struct evcnt sc_ev_t_p65to127n;
164 	struct evcnt sc_ev_t_p128to255n;
165 	struct evcnt sc_ev_t_p256to511;
166 	struct evcnt sc_ev_t_p512to1023;
167 	struct evcnt sc_ev_t_p1024to2047;
168 	struct evcnt sc_ev_t_p_gte2048;
169 	struct evcnt sc_ev_t_octets;
170 	struct evcnt sc_ev_r_packets;
171 	struct evcnt sc_ev_r_bc_pkt;
172 	struct evcnt sc_ev_r_mc_pkt;
173 	struct evcnt sc_ev_r_crc_align;
174 	struct evcnt sc_ev_r_undersize;
175 	struct evcnt sc_ev_r_oversize;
176 	struct evcnt sc_ev_r_frag;
177 	struct evcnt sc_ev_r_jab;
178 	struct evcnt sc_ev_r_p64;
179 	struct evcnt sc_ev_r_p65to127;
180 	struct evcnt sc_ev_r_p128to255;
181 	struct evcnt sc_ev_r_p256to511;
182 	struct evcnt sc_ev_r_p512to1023;
183 	struct evcnt sc_ev_r_p1024to2047;
184 	struct evcnt sc_ev_r_p_gte2048;
185 	struct evcnt sc_ev_r_octets;
186 #endif /* ENET_EVENT_COUNTER */
187 };
188 
189 #define TXDESC_WRITEOUT(idx)					\
190 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap,	\
191 	    sizeof(struct enet_txdesc) * (idx),			\
192 	    sizeof(struct enet_txdesc),				\
193 	    BUS_DMASYNC_PREWRITE)
194 
195 #define TXDESC_READIN(idx)					\
196 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap,	\
197 	    sizeof(struct enet_txdesc) * (idx),			\
198 	    sizeof(struct enet_txdesc),				\
199 	    BUS_DMASYNC_PREREAD)
200 
201 #define RXDESC_WRITEOUT(idx)					\
202 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap,	\
203 	    sizeof(struct enet_rxdesc) * (idx),			\
204 	    sizeof(struct enet_rxdesc),				\
205 	    BUS_DMASYNC_PREWRITE)
206 
207 #define RXDESC_READIN(idx)					\
208 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap,	\
209 	    sizeof(struct enet_rxdesc) * (idx),			\
210 	    sizeof(struct enet_rxdesc),				\
211 	    BUS_DMASYNC_PREREAD)
212 
213 #define ENET_REG_READ(sc, reg)					\
214 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg)
215 
216 #define ENET_REG_WRITE(sc, reg, value)				\
217 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value)
218 
219 static int enet_match(device_t, struct cfdata *, void *);
220 static void enet_attach(device_t, device_t, void *);
221 #ifdef ENET_EVENT_COUNTER
222 static void enet_attach_evcnt(struct enet_softc *);
223 static void enet_update_evcnt(struct enet_softc *);
224 #endif
225 
226 static int enet_intr(void *);
227 static void enet_tick(void *);
228 static int enet_tx_intr(void *);
229 static int enet_rx_intr(void *);
230 static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *,
231                          int);
232 
233 static void enet_start(struct ifnet *);
234 static int enet_ifflags_cb(struct ethercom *);
235 static int enet_ioctl(struct ifnet *, u_long, void *);
236 static int enet_init(struct ifnet *);
237 static void enet_stop(struct ifnet *, int);
238 static void enet_watchdog(struct ifnet *);
239 static void enet_mediastatus(struct ifnet *, struct ifmediareq *);
240 
241 static int enet_miibus_readreg(device_t, int, int);
242 static void enet_miibus_writereg(device_t, int, int, int);
243 static void enet_miibus_statchg(struct ifnet *);
244 
245 static void enet_ocotp_getmacaddr(uint8_t *);
246 static void enet_gethwaddr(struct enet_softc *, uint8_t *);
247 static void enet_sethwaddr(struct enet_softc *, uint8_t *);
248 static void enet_setmulti(struct enet_softc *);
249 static int enet_encap_mbufalign(struct mbuf **);
250 static int enet_encap_txring(struct enet_softc *, struct mbuf **);
251 static int enet_init_plls(struct enet_softc *);
252 static int enet_init_regs(struct enet_softc *, int);
253 static int enet_alloc_ring(struct enet_softc *);
254 static void enet_init_txring(struct enet_softc *);
255 static int enet_init_rxring(struct enet_softc *);
256 static void enet_reset_rxdesc(struct enet_softc *, int);
257 static int enet_alloc_rxbuf(struct enet_softc *, int);
258 static void enet_drain_txbuf(struct enet_softc *);
259 static void enet_drain_rxbuf(struct enet_softc *);
260 static int enet_alloc_dma(struct enet_softc *, size_t, void **,
261                           bus_dmamap_t *);
262 
263 CFATTACH_DECL_NEW(enet, sizeof(struct enet_softc),
264     enet_match, enet_attach, NULL, NULL);
265 
266 /* ARGSUSED */
267 static int
268 enet_match(device_t parent __unused, struct cfdata *match __unused, void *aux)
269 {
270 	struct axi_attach_args *aa;
271 
272 	aa = aux;
273 
274 	switch (aa->aa_addr) {
275 	case (IMX6_AIPS2_BASE + AIPS2_ENET_BASE):
276 		return 1;
277 	}
278 
279 	return 0;
280 }
281 
282 /* ARGSUSED */
283 static void
284 enet_attach(device_t parent __unused, device_t self, void *aux)
285 {
286 	struct enet_softc *sc;
287 	struct axi_attach_args *aa;
288 	struct ifnet *ifp;
289 
290 	aa = aux;
291 	sc = device_private(self);
292 	sc->sc_dev = self;
293 	sc->sc_iot = aa->aa_iot;
294 	sc->sc_addr = aa->aa_addr;
295 	sc->sc_dmat = aa->aa_dmat;
296 
297 	if (aa->aa_size == AXICF_SIZE_DEFAULT)
298 		aa->aa_size = AIPS2_ENET_SIZE;
299 
300 	aprint_naive("\n");
301 	aprint_normal(": Gigabit Ethernet Controller\n");
302 	if (bus_space_map(sc->sc_iot, sc->sc_addr, aa->aa_size, 0,
303 	    &sc->sc_ioh)) {
304 		aprint_error_dev(self, "cannot map registers\n");
305 		return;
306 	}
307 
308 	/* allocate dma buffer */
309 	if (enet_alloc_ring(sc))
310 		return;
311 
312 #define IS_ENADDR_ZERO(enaddr)				\
313 	((enaddr[0] | enaddr[1] | enaddr[2] |		\
314 	 enaddr[3] | enaddr[4] | enaddr[5]) == 0)
315 
316 	/* get mac-address from SoC eFuse */
317 	enet_ocotp_getmacaddr(sc->sc_enaddr);
318 	if (IS_ENADDR_ZERO(sc->sc_enaddr)) {
319 		/* by any chance, mac-address is already set by bootloader? */
320 		enet_gethwaddr(sc, sc->sc_enaddr);
321 		if (IS_ENADDR_ZERO(sc->sc_enaddr)) {
322 			/* give up. set randomly */
323 			uint32_t addr = random();
324 			/* not multicast */
325 			sc->sc_enaddr[0] = (addr >> 24) & 0xfc;
326 			sc->sc_enaddr[1] = addr >> 16;
327 			sc->sc_enaddr[2] = addr >> 8;
328 			sc->sc_enaddr[3] = addr;
329 			addr = random();
330 			sc->sc_enaddr[4] = addr >> 8;
331 			sc->sc_enaddr[5] = addr;
332 
333 			aprint_error_dev(self,
334 			    "cannot get mac address. set randomly\n");
335 		}
336 	}
337 	enet_sethwaddr(sc, sc->sc_enaddr);
338 
339 	aprint_normal_dev(self, "Ethernet address %s\n",
340 	    ether_sprintf(sc->sc_enaddr));
341 
342 	/* power up and init */
343 	if (enet_init_plls(sc) != 0)
344 		goto failure;
345 	enet_init_regs(sc, 1);
346 
347 	/* setup interrupt handlers */
348 	if ((sc->sc_ih = intr_establish(aa->aa_irq, IPL_NET,
349 	    IST_EDGE, enet_intr, sc)) == NULL) {
350 		aprint_error_dev(self, "unable to establish interrupt\n");
351 		goto failure;
352 	}
353 
354 	/* setup ifp */
355 	ifp = &sc->sc_ethercom.ec_if;
356 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
357 	ifp->if_softc = sc;
358 	ifp->if_mtu = ETHERMTU;
359 	ifp->if_baudrate = IF_Gbps(1);
360 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
361 	ifp->if_ioctl = enet_ioctl;
362 	ifp->if_start = enet_start;
363 	ifp->if_init = enet_init;
364 	ifp->if_stop = enet_stop;
365 	ifp->if_watchdog = enet_watchdog;
366 
367 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
368 #ifdef ENET_SUPPORT_JUMBO
369 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
370 #endif
371 
372 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
373 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx |
374 	    IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
375 	    IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx |
376 	    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
377 
378 	IFQ_SET_MAXLEN(&ifp->if_snd, max(ENET_TX_RING_CNT, IFQ_MAXLEN));
379 	IFQ_SET_READY(&ifp->if_snd);
380 
381 	/* setup MII */
382 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
383 	sc->sc_mii.mii_ifp = ifp;
384 	sc->sc_mii.mii_readreg = enet_miibus_readreg;
385 	sc->sc_mii.mii_writereg = enet_miibus_writereg;
386 	sc->sc_mii.mii_statchg = enet_miibus_statchg;
387 	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
388 	    enet_mediastatus);
389 
390 	/* try to attach PHY */
391 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
392 	    MII_OFFSET_ANY, 0);
393 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
394 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
395 		    0, NULL);
396 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
397 	} else {
398 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
399 	}
400 
401 	if_attach(ifp);
402 	ether_ifattach(ifp, sc->sc_enaddr);
403 	ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb);
404 
405 	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
406 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
407 
408 #ifdef ENET_EVENT_COUNTER
409 	enet_attach_evcnt(sc);
410 #endif
411 
412 	sc->sc_stopping = false;
413 	callout_init(&sc->sc_tick_ch, 0);
414 	callout_setfunc(&sc->sc_tick_ch, enet_tick, sc);
415 	callout_schedule(&sc->sc_tick_ch, hz);
416 
417 	return;
418 
419  failure:
420 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, aa->aa_size);
421 	return;
422 }
423 
424 #ifdef ENET_EVENT_COUNTER
425 static void
426 enet_attach_evcnt(struct enet_softc *sc)
427 {
428 	const char *xname;
429 
430 	xname = device_xname(sc->sc_dev);
431 
432 #define ENET_EVCNT_ATTACH(name)	\
433 	evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC,	\
434 	    NULL, xname, #name);
435 
436 	ENET_EVCNT_ATTACH(t_drop);
437 	ENET_EVCNT_ATTACH(t_packets);
438 	ENET_EVCNT_ATTACH(t_bc_pkt);
439 	ENET_EVCNT_ATTACH(t_mc_pkt);
440 	ENET_EVCNT_ATTACH(t_crc_align);
441 	ENET_EVCNT_ATTACH(t_undersize);
442 	ENET_EVCNT_ATTACH(t_oversize);
443 	ENET_EVCNT_ATTACH(t_frag);
444 	ENET_EVCNT_ATTACH(t_jab);
445 	ENET_EVCNT_ATTACH(t_col);
446 	ENET_EVCNT_ATTACH(t_p64);
447 	ENET_EVCNT_ATTACH(t_p65to127n);
448 	ENET_EVCNT_ATTACH(t_p128to255n);
449 	ENET_EVCNT_ATTACH(t_p256to511);
450 	ENET_EVCNT_ATTACH(t_p512to1023);
451 	ENET_EVCNT_ATTACH(t_p1024to2047);
452 	ENET_EVCNT_ATTACH(t_p_gte2048);
453 	ENET_EVCNT_ATTACH(t_octets);
454 	ENET_EVCNT_ATTACH(r_packets);
455 	ENET_EVCNT_ATTACH(r_bc_pkt);
456 	ENET_EVCNT_ATTACH(r_mc_pkt);
457 	ENET_EVCNT_ATTACH(r_crc_align);
458 	ENET_EVCNT_ATTACH(r_undersize);
459 	ENET_EVCNT_ATTACH(r_oversize);
460 	ENET_EVCNT_ATTACH(r_frag);
461 	ENET_EVCNT_ATTACH(r_jab);
462 	ENET_EVCNT_ATTACH(r_p64);
463 	ENET_EVCNT_ATTACH(r_p65to127);
464 	ENET_EVCNT_ATTACH(r_p128to255);
465 	ENET_EVCNT_ATTACH(r_p256to511);
466 	ENET_EVCNT_ATTACH(r_p512to1023);
467 	ENET_EVCNT_ATTACH(r_p1024to2047);
468 	ENET_EVCNT_ATTACH(r_p_gte2048);
469 	ENET_EVCNT_ATTACH(r_octets);
470 }
471 
472 static void
473 enet_update_evcnt(struct enet_softc *sc)
474 {
475 	sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP);
476 	sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS);
477 	sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT);
478 	sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT);
479 	sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN);
480 	sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE);
481 	sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE);
482 	sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG);
483 	sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB);
484 	sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL);
485 	sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64);
486 	sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N);
487 	sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N);
488 	sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511);
489 	sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023);
490 	sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047);
491 	sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048);
492 	sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS);
493 	sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS);
494 	sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT);
495 	sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT);
496 	sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN);
497 	sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE);
498 	sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE);
499 	sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG);
500 	sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB);
501 	sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64);
502 	sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127);
503 	sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255);
504 	sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511);
505 	sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023);
506 	sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047);
507 	sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048);
508 	sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS);
509 }
510 #endif /* ENET_EVENT_COUNTER */
511 
512 static void
513 enet_tick(void *arg)
514 {
515 	struct enet_softc *sc;
516 	struct mii_data *mii;
517 	struct ifnet *ifp;
518 	int s;
519 
520 	sc = arg;
521 	mii = &sc->sc_mii;
522 	ifp = &sc->sc_ethercom.ec_if;
523 
524 	s = splnet();
525 
526 	if (sc->sc_stopping)
527 		goto out;
528 
529 
530 #ifdef ENET_EVENT_COUNTER
531 	enet_update_evcnt(sc);
532 #endif
533 
534 	/* update counters */
535 	ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE);
536 	ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_FRAG);
537 	ifp->if_ierrors += ENET_REG_READ(sc, ENET_RMON_R_JAB);
538 
539 	/* clear counters */
540 	ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR);
541 	ENET_REG_WRITE(sc, ENET_MIBC, 0);
542 
543 	mii_tick(mii);
544  out:
545 
546 	if (!sc->sc_stopping)
547 		callout_schedule(&sc->sc_tick_ch, hz);
548 
549 	splx(s);
550 }
551 
552 static int
553 enet_intr(void *arg)
554 {
555 	struct enet_softc *sc;
556 	struct ifnet *ifp;
557 	uint32_t status;
558 
559 	sc = arg;
560 	status = ENET_REG_READ(sc, ENET_EIR);
561 
562 	if (status & ENET_EIR_TXF)
563 		enet_tx_intr(arg);
564 
565 	if (status & ENET_EIR_RXF)
566 		enet_rx_intr(arg);
567 
568 	if (status & ENET_EIR_EBERR) {
569 		device_printf(sc->sc_dev, "Ethernet Bus Error\n");
570 		ifp = &sc->sc_ethercom.ec_if;
571 		enet_stop(ifp, 1);
572 		enet_init(ifp);
573 	} else {
574 		ENET_REG_WRITE(sc, ENET_EIR, status);
575 	}
576 
577 	rnd_add_uint32(&sc->sc_rnd_source, status);
578 
579 	return 1;
580 }
581 
582 static int
583 enet_tx_intr(void *arg)
584 {
585 	struct enet_softc *sc;
586 	struct ifnet *ifp;
587 	struct enet_txsoft *txs;
588 	int idx;
589 
590 	sc = (struct enet_softc *)arg;
591 	ifp = &sc->sc_ethercom.ec_if;
592 
593 	for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx;
594 	    idx = ENET_TX_NEXTIDX(idx)) {
595 
596 		txs = &sc->sc_txsoft[idx];
597 
598 		TXDESC_READIN(idx);
599 		if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) {
600 			/* This TX Descriptor has not been transmitted yet */
601 			break;
602 		}
603 
604 		/* txsoft is available on first segment (TXFLAGS1_T1) */
605 		if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) {
606 			bus_dmamap_unload(sc->sc_dmat,
607 			    txs->txs_dmamap);
608 			m_freem(txs->txs_mbuf);
609 			ifp->if_opackets++;
610 		}
611 
612 		/* checking error */
613 		if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) {
614 			uint32_t flags2;
615 
616 			flags2 = sc->sc_txdesc_ring[idx].tx_flags2;
617 
618 			if (flags2 & (TXFLAGS2_TXE |
619 			    TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE |
620 			    TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) {
621 #ifdef DEBUG_ENET
622 				if (enet_debug) {
623 					char flagsbuf[128];
624 
625 					snprintb(flagsbuf, sizeof(flagsbuf),
626 					    "\20" "\20TRANSMIT" "\16UNDERFLOW"
627 					    "\15COLLISION" "\14FRAME"
628 					    "\13LATECOLLISION" "\12OVERFLOW",
629 					    flags2);
630 
631 					device_printf(sc->sc_dev,
632 					    "txdesc[%d]: transmit error: "
633 					    "flags2=%s\n", idx, flagsbuf);
634 				}
635 #endif /* DEBUG_ENET */
636 				ifp->if_oerrors++;
637 			}
638 		}
639 
640 		sc->sc_tx_free++;
641 	}
642 	sc->sc_tx_considx = idx;
643 
644 	if (sc->sc_tx_free > 0)
645 		ifp->if_flags &= ~IFF_OACTIVE;
646 
647 	/*
648 	 * No more pending TX descriptor,
649 	 * cancel the watchdog timer.
650 	 */
651 	if (sc->sc_tx_free == ENET_TX_RING_CNT)
652 		ifp->if_timer = 0;
653 
654 	return 1;
655 }
656 
657 static int
658 enet_rx_intr(void *arg)
659 {
660 	struct enet_softc *sc;
661 	struct ifnet *ifp;
662 	struct enet_rxsoft *rxs;
663 	int idx, len, amount;
664 	uint32_t flags1, flags2;
665 	struct mbuf *m, *m0, *mprev;
666 
667 	sc = arg;
668 	ifp = &sc->sc_ethercom.ec_if;
669 
670 	m0 = mprev = NULL;
671 	amount = 0;
672 	for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) {
673 
674 		rxs = &sc->sc_rxsoft[idx];
675 
676 		RXDESC_READIN(idx);
677 		if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) {
678 			/* This RX Descriptor has not been received yet */
679 			break;
680 		}
681 
682 		/*
683 		 * build mbuf from RX Descriptor if needed
684 		 */
685 		m = rxs->rxs_mbuf;
686 		rxs->rxs_mbuf = NULL;
687 
688 		flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len;
689 		len = RXFLAGS1_LEN(flags1);
690 
691 #define RACC_SHIFT16	2
692 		if (m0 == NULL) {
693 			m0 = m;
694 			m_adj(m0, RACC_SHIFT16);
695 			len -= RACC_SHIFT16;
696 			m->m_len = len;
697 			amount = len;
698 		} else {
699 			if (flags1 & RXFLAGS1_L)
700 				len = len - amount - RACC_SHIFT16;
701 
702 			m->m_len = len;
703 			amount += len;
704 			m->m_flags &= ~M_PKTHDR;
705 			mprev->m_next = m;
706 		}
707 		mprev = m;
708 
709 		flags2 = sc->sc_rxdesc_ring[idx].rx_flags2;
710 
711 		if (flags1 & RXFLAGS1_L) {
712 			/* last buffer */
713 			if ((amount < ETHER_HDR_LEN) ||
714 			    ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO |
715 			    RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) ||
716 			    (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE |
717 			    RXFLAGS2_CE)))) {
718 
719 #ifdef DEBUG_ENET
720 				if (enet_debug) {
721 					char flags1buf[128], flags2buf[128];
722 					snprintb(flags1buf, sizeof(flags1buf),
723 					    "\20" "\31MISS" "\26LENGTHVIOLATION"
724 					    "\25NONOCTET" "\23CRC" "\22OVERRUN"a
725 					    "\21TRUNCATED", flags1);
726 					snprintb(flags2buf, sizeof(flags2buf),
727 					    "\20" "\40MAC" "\33PHY"
728 					    "\32COLLISION", flags2);
729 
730 					DEVICE_DPRINTF(
731 					    "rxdesc[%d]: receive error: "
732 					    "flags1=%s,flags2=%s,len=%d\n",
733 					    idx, flags1buf, flags2buf, amount);
734 				}
735 #endif /* DEBUG_ENET */
736 				ifp->if_ierrors++;
737 				m_freem(m0);
738 
739 			} else {
740 				/* packet receive ok */
741 				ifp->if_ipackets++;
742 				m0->m_pkthdr.rcvif = ifp;
743 				m0->m_pkthdr.len = amount;
744 
745 				bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
746 				    rxs->rxs_dmamap->dm_mapsize,
747 				    BUS_DMASYNC_PREREAD);
748 
749 				if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 |
750 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
751 				    M_CSUM_TCPv6 | M_CSUM_UDPv6))
752 					enet_rx_csum(sc, ifp, m0, idx);
753 
754 				/* Pass this up to any BPF listeners */
755 				bpf_mtap(ifp, m0);
756 
757 				(*ifp->if_input)(ifp, m0);
758 			}
759 
760 			m0 = NULL;
761 			mprev = NULL;
762 			amount = 0;
763 
764 		} else {
765 			/* continued from previous buffer */
766 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
767 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
768 		}
769 
770 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
771 		if (enet_alloc_rxbuf(sc, idx) != 0) {
772 			panic("enet_alloc_rxbuf NULL\n");
773 		}
774 	}
775 	sc->sc_rx_readidx = idx;
776 
777 	/* re-enable RX DMA to make sure */
778 	ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE);
779 
780 	return 1;
781 }
782 
783 static void
784 enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx)
785 {
786 	uint32_t flags2;
787 	uint8_t proto;
788 
789 	flags2 = sc->sc_rxdesc_ring[idx].rx_flags2;
790 
791 	if (flags2 & RXFLAGS2_IPV6) {
792 		proto = sc->sc_rxdesc_ring[idx].rx_proto;
793 
794 		/* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */
795 		if ((proto == IPPROTO_TCP) &&
796 		    (ifp->if_csum_flags_rx & M_CSUM_TCPv6))
797 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv6;
798 		else if ((proto == IPPROTO_UDP) &&
799 		    (ifp->if_csum_flags_rx & M_CSUM_UDPv6))
800 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv6;
801 		else
802 			return;
803 
804 		/* IPv6 protocol checksum error */
805 		if (flags2 & RXFLAGS2_PCR)
806 			m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
807 
808 	} else {
809 		struct ether_header *eh;
810 		uint8_t *ip;
811 
812 		eh = mtod(m, struct ether_header *);
813 
814 		/* XXX: is an IPv4? */
815 		if (ntohs(eh->ether_type) != ETHERTYPE_IP)
816 			return;
817 		ip = (uint8_t *)(eh + 1);
818 		if ((ip[0] & 0xf0) == 0x40)
819 			return;
820 
821 		proto = sc->sc_rxdesc_ring[idx].rx_proto;
822 		if (flags2 & RXFLAGS2_ICE) {
823 			if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
824 				m->m_pkthdr.csum_flags |=
825 				    M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
826 			}
827 		} else {
828 			if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
829 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
830 			}
831 
832 			/*
833 			 * PCR is valid when
834 			 * ICE == 0 and FRAG == 0
835 			 */
836 			if (flags2 & RXFLAGS2_FRAG)
837 				return;
838 
839 			/*
840 			 * PCR is valid when proto is TCP or UDP
841 			 */
842 			if ((proto == IPPROTO_TCP) &&
843 			    (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
844 				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
845 			else if ((proto == IPPROTO_UDP) &&
846 			    (ifp->if_csum_flags_rx & M_CSUM_UDPv4))
847 				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
848 			else
849 				return;
850 
851 			/* IPv4 protocol cksum error */
852 			if (flags2 & RXFLAGS2_PCR)
853 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
854 		}
855 	}
856 }
857 
858 static void
859 enet_setmulti(struct enet_softc *sc)
860 {
861 	struct ifnet *ifp;
862 	struct ether_multi *enm;
863 	struct ether_multistep step;
864 	int promisc;
865 	uint32_t crc;
866 	uint32_t gaddr[2];
867 
868 	ifp = &sc->sc_ethercom.ec_if;
869 
870 	promisc = 0;
871 	if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) {
872 		ifp->if_flags |= IFF_ALLMULTI;
873 		if (ifp->if_flags & IFF_PROMISC)
874 			promisc = 1;
875 		gaddr[0] = gaddr[1] = 0xffffffff;
876 	} else {
877 		gaddr[0] = gaddr[1] = 0;
878 
879 		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
880 		while (enm != NULL) {
881 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
882 			gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
883 			ETHER_NEXT_MULTI(step, enm);
884 		}
885 	}
886 
887 	ENET_REG_WRITE(sc, ENET_GAUR, gaddr[0]);
888 	ENET_REG_WRITE(sc, ENET_GALR, gaddr[1]);
889 
890 	if (promisc) {
891 		/* match all packet */
892 		ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff);
893 		ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff);
894 	} else {
895 		/* don't match any packet */
896 		ENET_REG_WRITE(sc, ENET_IAUR, 0);
897 		ENET_REG_WRITE(sc, ENET_IALR, 0);
898 	}
899 }
900 
901 static void
902 enet_ocotp_getmacaddr(uint8_t *macaddr)
903 {
904 #if NIMXOCOTP > 0
905 	uint32_t addr;
906 
907 	addr = imxocotp_read(OCOTP_MAC1);
908 	macaddr[0] = addr >> 8;
909 	macaddr[1] = addr;
910 
911 	addr = imxocotp_read(OCOTP_MAC0);
912 	macaddr[2] = addr >> 24;
913 	macaddr[3] = addr >> 16;
914 	macaddr[4] = addr >> 8;
915 	macaddr[5] = addr;
916 #endif
917 }
918 
919 static void
920 enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr)
921 {
922 	uint32_t paddr;
923 
924 	paddr = ENET_REG_READ(sc, ENET_PALR);
925 	hwaddr[0] = paddr >> 24;
926 	hwaddr[1] = paddr >> 16;
927 	hwaddr[2] = paddr >> 8;
928 	hwaddr[3] = paddr;
929 
930 	paddr = ENET_REG_READ(sc, ENET_PAUR);
931 	hwaddr[4] = paddr >> 24;
932 	hwaddr[5] = paddr >> 16;
933 }
934 
935 static void
936 enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr)
937 {
938 	uint32_t paddr;
939 
940 	paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) |
941 	    hwaddr[3];
942 	ENET_REG_WRITE(sc, ENET_PALR, paddr);
943 	paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16);
944 	ENET_REG_WRITE(sc, ENET_PAUR, paddr);
945 }
946 
947 /*
948  * ifnet interfaces
949  */
950 static int
951 enet_init(struct ifnet *ifp)
952 {
953 	struct enet_softc *sc;
954 	int s, error;
955 
956 	sc = ifp->if_softc;
957 
958 	s = splnet();
959 
960 	enet_init_regs(sc, 0);
961 	enet_init_txring(sc);
962 	error = enet_init_rxring(sc);
963 	if (error != 0) {
964 		enet_drain_rxbuf(sc);
965 		device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n");
966 		goto init_failure;
967 	}
968 
969 	/* reload mac address */
970 	memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
971 	enet_sethwaddr(sc, sc->sc_enaddr);
972 
973 	/* program multicast address */
974 	enet_setmulti(sc);
975 
976 	/* update if_flags */
977 	ifp->if_flags |= IFF_RUNNING;
978 	ifp->if_flags &= ~IFF_OACTIVE;
979 
980 	/* update local copy of if_flags */
981 	sc->sc_if_flags = ifp->if_flags;
982 
983 	/* mii */
984 	mii_mediachg(&sc->sc_mii);
985 
986 	/* enable RX DMA */
987 	ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE);
988 
989 	sc->sc_stopping = false;
990 
991  init_failure:
992 	splx(s);
993 
994 	return error;
995 }
996 
997 static void
998 enet_start(struct ifnet *ifp)
999 {
1000 	struct enet_softc *sc;
1001 	struct mbuf *m;
1002 	int npkt;
1003 
1004 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1005 		return;
1006 
1007 	sc = ifp->if_softc;
1008 	for (npkt = 0; ; npkt++) {
1009 		IFQ_POLL(&ifp->if_snd, m);
1010 		if (m == NULL)
1011 			break;
1012 
1013 		if (sc->sc_tx_free <= 0) {
1014 			/* no tx descriptor now... */
1015 			ifp->if_flags |= IFF_OACTIVE;
1016 			DEVICE_DPRINTF("TX descriptor is full\n");
1017 			break;
1018 		}
1019 
1020 		IFQ_DEQUEUE(&ifp->if_snd, m);
1021 
1022 		if (enet_encap_txring(sc, &m) != 0) {
1023 			/* too many mbuf chains? */
1024 			ifp->if_flags |= IFF_OACTIVE;
1025 			DEVICE_DPRINTF(
1026 			    "TX descriptor is full. dropping packet\n");
1027 			m_freem(m);
1028 			ifp->if_oerrors++;
1029 			break;
1030 		}
1031 
1032 		/* Pass the packet to any BPF listeners */
1033 		bpf_mtap(ifp, m);
1034 	}
1035 
1036 	if (npkt) {
1037 		/* enable TX DMA */
1038 		ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE);
1039 
1040 		ifp->if_timer = 5;
1041 	}
1042 }
1043 
1044 static void
1045 enet_stop(struct ifnet *ifp, int disable)
1046 {
1047 	struct enet_softc *sc;
1048 	int s;
1049 	uint32_t v;
1050 
1051 	sc = ifp->if_softc;
1052 
1053 	s = splnet();
1054 
1055 	sc->sc_stopping = true;
1056 	callout_stop(&sc->sc_tick_ch);
1057 
1058 	/* clear ENET_ECR[ETHEREN] to abort receive and transmit */
1059 	v = ENET_REG_READ(sc, ENET_ECR);
1060 	ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN);
1061 
1062 	/* Mark the interface as down and cancel the watchdog timer. */
1063 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1064 	ifp->if_timer = 0;
1065 
1066 	if (disable) {
1067 		enet_drain_txbuf(sc);
1068 		enet_drain_rxbuf(sc);
1069 	}
1070 
1071 	splx(s);
1072 }
1073 
1074 static void
1075 enet_watchdog(struct ifnet *ifp)
1076 {
1077 	struct enet_softc *sc;
1078 	int s;
1079 
1080 	sc = ifp->if_softc;
1081 	s = splnet();
1082 
1083 	device_printf(sc->sc_dev, "watchdog timeout\n");
1084 	ifp->if_oerrors++;
1085 
1086 	/* salvage packets left in descriptors */
1087 	enet_tx_intr(sc);
1088 	enet_rx_intr(sc);
1089 
1090 	/* reset */
1091 	enet_stop(ifp, 1);
1092 	enet_init(ifp);
1093 
1094 	splx(s);
1095 }
1096 
1097 static void
1098 enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1099 {
1100 	struct enet_softc *sc = ifp->if_softc;
1101 
1102 	ether_mediastatus(ifp, ifmr);
1103 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
1104 	    | sc->sc_flowflags;
1105 }
1106 
1107 static int
1108 enet_ifflags_cb(struct ethercom *ec)
1109 {
1110 	struct ifnet *ifp = &ec->ec_if;
1111 	struct enet_softc *sc = ifp->if_softc;
1112 	int change = ifp->if_flags ^ sc->sc_if_flags;
1113 
1114 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
1115 		return ENETRESET;
1116 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
1117 		return 0;
1118 
1119 	enet_setmulti(sc);
1120 
1121 	sc->sc_if_flags = ifp->if_flags;
1122 	return 0;
1123 }
1124 
1125 static int
1126 enet_ioctl(struct ifnet *ifp, u_long command, void *data)
1127 {
1128 	struct enet_softc *sc;
1129 	struct ifreq *ifr;
1130 	int s, error;
1131 	uint32_t v;
1132 
1133 	sc = ifp->if_softc;
1134 	ifr = data;
1135 
1136 	error = 0;
1137 
1138 	s = splnet();
1139 
1140 	switch (command) {
1141 	case SIOCSIFMTU:
1142 		if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) {
1143 			error = EINVAL;
1144 		} else {
1145 			ifp->if_mtu = ifr->ifr_mtu;
1146 
1147 			/* set maximum frame length */
1148 			v = MTU2FRAMESIZE(ifr->ifr_mtu);
1149 			ENET_REG_WRITE(sc, ENET_FTRL, v);
1150 			v = ENET_REG_READ(sc, ENET_RCR);
1151 			v &= ~ENET_RCR_MAX_FL(0x3fff);
1152 			v |= ENET_RCR_MAX_FL(ifp->if_mtu +
1153 			    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
1154 			ENET_REG_WRITE(sc, ENET_RCR, v);
1155 		}
1156 		break;
1157 	case SIOCSIFMEDIA:
1158 	case SIOCGIFMEDIA:
1159 		/* Flow control requires full-duplex mode. */
1160 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1161 		    (ifr->ifr_media & IFM_FDX) == 0)
1162 			ifr->ifr_media &= ~IFM_ETH_FMASK;
1163 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1164 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1165 				/* We can do both TXPAUSE and RXPAUSE. */
1166 				ifr->ifr_media |=
1167 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1168 			}
1169 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1170 		}
1171 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1172 		break;
1173 	default:
1174 		error = ether_ioctl(ifp, command, data);
1175 		if (error != ENETRESET)
1176 			break;
1177 
1178 		/* post-process */
1179 		error = 0;
1180 		switch (command) {
1181 		case SIOCSIFCAP:
1182 			error = (*ifp->if_init)(ifp);
1183 			break;
1184 		case SIOCADDMULTI:
1185 		case SIOCDELMULTI:
1186 			if (ifp->if_flags & IFF_RUNNING)
1187 				enet_setmulti(sc);
1188 			break;
1189 		}
1190 		break;
1191 	}
1192 
1193 	splx(s);
1194 
1195 	return error;
1196 }
1197 
1198 /*
1199  * for MII
1200  */
1201 static int
1202 enet_miibus_readreg(device_t dev, int phy, int reg)
1203 {
1204 	struct enet_softc *sc;
1205 	int timeout;
1206 	uint32_t val, status;
1207 
1208 	sc = device_private(dev);
1209 
1210 	/* clear MII update */
1211 	ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII);
1212 
1213 	/* read command */
1214 	ENET_REG_WRITE(sc, ENET_MMFR,
1215 	    ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA |
1216 	    ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy));
1217 
1218 	/* check MII update */
1219 	for (timeout = 5000; timeout > 0; --timeout) {
1220 		status = ENET_REG_READ(sc, ENET_EIR);
1221 		if (status & ENET_EIR_MII)
1222 			break;
1223 	}
1224 	if (timeout <= 0) {
1225 		DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n",
1226 		    reg);
1227 		val = -1;
1228 	} else {
1229 		val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK;
1230 	}
1231 
1232 	return val;
1233 }
1234 
1235 static void
1236 enet_miibus_writereg(device_t dev, int phy, int reg, int val)
1237 {
1238 	struct enet_softc *sc;
1239 	int timeout;
1240 
1241 	sc = device_private(dev);
1242 
1243 	/* clear MII update */
1244 	ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII);
1245 
1246 	/* write command */
1247 	ENET_REG_WRITE(sc, ENET_MMFR,
1248 	    ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA |
1249 	    ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) |
1250 	    (ENET_MMFR_DATAMASK & val));
1251 
1252 	/* check MII update */
1253 	for (timeout = 5000; timeout > 0; --timeout) {
1254 		if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII)
1255 			break;
1256 	}
1257 	if (timeout <= 0) {
1258 		DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n",
1259 		    reg);
1260 	}
1261 }
1262 
1263 static void
1264 enet_miibus_statchg(struct ifnet *ifp)
1265 {
1266 	struct enet_softc *sc;
1267 	struct mii_data *mii;
1268 	struct ifmedia_entry *ife;
1269 	uint32_t ecr, ecr0;
1270 	uint32_t rcr, rcr0;
1271 	uint32_t tcr, tcr0;
1272 
1273 	sc = ifp->if_softc;
1274 	mii = &sc->sc_mii;
1275 	ife = mii->mii_media.ifm_cur;
1276 
1277 	/* get current status */
1278 	ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET;
1279 	rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR);
1280 	tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR);
1281 
1282 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1283 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
1284 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1285 		mii->mii_media_active &= ~IFM_ETH_FMASK;
1286 	}
1287 
1288 	if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1289 		tcr |= ENET_TCR_FDEN;	/* full duplex */
1290 		rcr &= ~ENET_RCR_DRT;;	/* enable receive on transmit */
1291 	} else {
1292 		tcr &= ~ENET_TCR_FDEN;	/* half duplex */
1293 		rcr |= ENET_RCR_DRT;	/* disable receive on transmit */
1294 	}
1295 
1296 	if ((tcr ^ tcr0) & ENET_TCR_FDEN) {
1297 		/*
1298 		 * need to reset because
1299 		 * FDEN can change when ECR[ETHEREN] is 0
1300 		 */
1301 		enet_init_regs(sc, 0);
1302 		return;
1303 	}
1304 
1305 	switch (IFM_SUBTYPE(ife->ifm_media)) {
1306 	case IFM_AUTO:
1307 	case IFM_1000_T:
1308 		ecr |= ENET_ECR_SPEED;		/* 1000Mbps mode */
1309 		break;
1310 	case IFM_100_TX:
1311 		ecr &= ~ENET_ECR_SPEED;		/* 100Mbps mode */
1312 		rcr &= ~ENET_RCR_RMII_10T;	/* 100Mbps mode */
1313 		break;
1314 	case IFM_10_T:
1315 		ecr &= ~ENET_ECR_SPEED;		/* 10Mbps mode */
1316 		rcr |= ENET_RCR_RMII_10T;	/* 10Mbps mode */
1317 		break;
1318 	default:
1319 		ecr = ecr0;
1320 		rcr = rcr0;
1321 		tcr = tcr0;
1322 		break;
1323 	}
1324 
1325 	if (sc->sc_flowflags & IFM_FLOW)
1326 		rcr |= ENET_RCR_FCE;
1327 	else
1328 		rcr &= ~ENET_RCR_FCE;
1329 
1330 	/* update registers if need change */
1331 	if (ecr != ecr0)
1332 		ENET_REG_WRITE(sc, ENET_ECR, ecr);
1333 	if (rcr != rcr0)
1334 		ENET_REG_WRITE(sc, ENET_RCR, rcr);
1335 	if (tcr != tcr0)
1336 		ENET_REG_WRITE(sc, ENET_TCR, tcr);
1337 }
1338 
1339 /*
1340  * handling descriptors
1341  */
1342 static void
1343 enet_init_txring(struct enet_softc *sc)
1344 {
1345 	int i;
1346 
1347 	/* build TX ring */
1348 	for (i = 0; i < ENET_TX_RING_CNT; i++) {
1349 		sc->sc_txdesc_ring[i].tx_flags1_len =
1350 		    ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0);
1351 		sc->sc_txdesc_ring[i].tx_databuf = 0;
1352 		sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT;
1353 		sc->sc_txdesc_ring[i].tx__reserved1 = 0;
1354 		sc->sc_txdesc_ring[i].tx_flags3 = 0;
1355 		sc->sc_txdesc_ring[i].tx_1588timestamp = 0;
1356 		sc->sc_txdesc_ring[i].tx__reserved2 = 0;
1357 		sc->sc_txdesc_ring[i].tx__reserved3 = 0;
1358 
1359 		TXDESC_WRITEOUT(i);
1360 	}
1361 
1362 	sc->sc_tx_free = ENET_TX_RING_CNT;
1363 	sc->sc_tx_considx = 0;
1364 	sc->sc_tx_prodidx = 0;
1365 }
1366 
1367 static int
1368 enet_init_rxring(struct enet_softc *sc)
1369 {
1370 	int i, error;
1371 
1372 	/* build RX ring */
1373 	for (i = 0; i < ENET_RX_RING_CNT; i++) {
1374 		error = enet_alloc_rxbuf(sc, i);
1375 		if (error != 0)
1376 			return error;
1377 	}
1378 
1379 	sc->sc_rx_readidx = 0;
1380 
1381 	return 0;
1382 }
1383 
1384 static int
1385 enet_alloc_rxbuf(struct enet_softc *sc, int idx)
1386 {
1387 	struct mbuf *m;
1388 	int error;
1389 
1390 	KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT));
1391 
1392 	/* free mbuf if already allocated */
1393 	if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) {
1394 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap);
1395 		m_freem(sc->sc_rxsoft[idx].rxs_mbuf);
1396 		sc->sc_rxsoft[idx].rxs_mbuf = NULL;
1397 	}
1398 
1399 	/* allocate new mbuf cluster */
1400 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1401 	if (m == NULL)
1402 		return ENOBUFS;
1403 	MCLGET(m, M_DONTWAIT);
1404 	if (!(m->m_flags & M_EXT)) {
1405 		m_freem(m);
1406 		return ENOBUFS;
1407 	}
1408 	m->m_len = MCLBYTES;
1409 	m->m_next = NULL;
1410 
1411 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap,
1412 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1413 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1414 	if (error)
1415 		return error;
1416 
1417 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0,
1418 	    sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize,
1419 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1420 
1421 	sc->sc_rxsoft[idx].rxs_mbuf = m;
1422 	enet_reset_rxdesc(sc, idx);
1423 	return 0;
1424 }
1425 
1426 static void
1427 enet_reset_rxdesc(struct enet_softc *sc, int idx)
1428 {
1429 	uint32_t paddr;
1430 
1431 	paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr;
1432 
1433 	sc->sc_rxdesc_ring[idx].rx_flags1_len =
1434 	    RXFLAGS1_E |
1435 	    ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0);
1436 	sc->sc_rxdesc_ring[idx].rx_databuf = paddr;
1437 	sc->sc_rxdesc_ring[idx].rx_flags2 =
1438 	    RXFLAGS2_INT;
1439 	sc->sc_rxdesc_ring[idx].rx_hl = 0;
1440 	sc->sc_rxdesc_ring[idx].rx_proto = 0;
1441 	sc->sc_rxdesc_ring[idx].rx_cksum = 0;
1442 	sc->sc_rxdesc_ring[idx].rx_flags3 = 0;
1443 	sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0;
1444 	sc->sc_rxdesc_ring[idx].rx__reserved2 = 0;
1445 	sc->sc_rxdesc_ring[idx].rx__reserved3 = 0;
1446 
1447 	RXDESC_WRITEOUT(idx);
1448 }
1449 
1450 static void
1451 enet_drain_txbuf(struct enet_softc *sc)
1452 {
1453 	int idx;
1454 	struct enet_txsoft *txs;
1455 	struct ifnet *ifp;
1456 
1457 	ifp = &sc->sc_ethercom.ec_if;
1458 
1459 	for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx;
1460 	    idx = ENET_TX_NEXTIDX(idx)) {
1461 
1462 		/* txsoft[] is used only first segment */
1463 		txs = &sc->sc_txsoft[idx];
1464 		TXDESC_READIN(idx);
1465 		if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) {
1466 			sc->sc_txdesc_ring[idx].tx_flags1_len = 0;
1467 			bus_dmamap_unload(sc->sc_dmat,
1468 			    txs->txs_dmamap);
1469 			m_freem(txs->txs_mbuf);
1470 
1471 			ifp->if_oerrors++;
1472 		}
1473 		sc->sc_tx_free++;
1474 	}
1475 }
1476 
1477 static void
1478 enet_drain_rxbuf(struct enet_softc *sc)
1479 {
1480 	int i;
1481 
1482 	for (i = 0; i < ENET_RX_RING_CNT; i++) {
1483 		if (sc->sc_rxsoft[i].rxs_mbuf != NULL) {
1484 			sc->sc_rxdesc_ring[i].rx_flags1_len = 0;
1485 			bus_dmamap_unload(sc->sc_dmat,
1486 			    sc->sc_rxsoft[i].rxs_dmamap);
1487 			m_freem(sc->sc_rxsoft[i].rxs_mbuf);
1488 			sc->sc_rxsoft[i].rxs_mbuf = NULL;
1489 		}
1490 	}
1491 }
1492 
1493 static int
1494 enet_alloc_ring(struct enet_softc *sc)
1495 {
1496 	int i, error;
1497 
1498 	/*
1499 	 * build DMA maps for TX.
1500 	 * TX descriptor must be able to contain mbuf chains,
1501 	 * so, make up ENET_MAX_PKT_NSEGS dmamap.
1502 	 */
1503 	for (i = 0; i < ENET_TX_RING_CNT; i++) {
1504 		error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN,
1505 		    ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT,
1506 		    &sc->sc_txsoft[i].txs_dmamap);
1507 
1508 		if (error) {
1509 			aprint_error_dev(sc->sc_dev,
1510 			    "can't create DMA map for TX descs\n");
1511 			goto fail_1;
1512 		}
1513 	}
1514 
1515 	/*
1516 	 * build DMA maps for RX.
1517 	 * RX descripter contains An mbuf cluster,
1518 	 * and make up a dmamap.
1519 	 */
1520 	for (i = 0; i < ENET_RX_RING_CNT; i++) {
1521 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1522 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT,
1523 		    &sc->sc_rxsoft[i].rxs_dmamap);
1524 		if (error) {
1525 			aprint_error_dev(sc->sc_dev,
1526 			    "can't create DMA map for RX descs\n");
1527 			goto fail_2;
1528 		}
1529 	}
1530 
1531 	if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT,
1532 	    (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0)
1533 		return -1;
1534 	memset(sc->sc_txdesc_ring, 0,
1535 	    sizeof(struct enet_txdesc) * ENET_TX_RING_CNT);
1536 
1537 	if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT,
1538 	    (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0)
1539 		return -1;
1540 	memset(sc->sc_rxdesc_ring, 0,
1541 	    sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT);
1542 
1543 	return 0;
1544 
1545  fail_2:
1546 	for (i = 0; i < ENET_RX_RING_CNT; i++) {
1547 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1548 			bus_dmamap_destroy(sc->sc_dmat,
1549 			    sc->sc_rxsoft[i].rxs_dmamap);
1550 	}
1551  fail_1:
1552 	for (i = 0; i < ENET_TX_RING_CNT; i++) {
1553 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1554 			bus_dmamap_destroy(sc->sc_dmat,
1555 			    sc->sc_txsoft[i].txs_dmamap);
1556 	}
1557 	return error;
1558 }
1559 
1560 static int
1561 enet_encap_mbufalign(struct mbuf **mp)
1562 {
1563 	struct mbuf *m, *m0, *mt, *p, *x;
1564 	void *ap;
1565 	uint32_t alignoff, chiplen;
1566 
1567 	/*
1568 	 * iMX6 SoC ethernet controller requires
1569 	 * address of buffer must aligned 8, and
1570 	 * length of buffer must be greater than 10 (first fragment only?)
1571 	 */
1572 #define ALIGNBYTE	8
1573 #define MINBUFSIZE	10
1574 #define ALIGN_PTR(p, align)	\
1575 	(void *)(((uintptr_t)(p) + ((align) - 1)) & -(align))
1576 
1577 	m0 = *mp;
1578 	mt = p = NULL;
1579 	for (m = m0; m != NULL; m = m->m_next) {
1580 		alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1);
1581 		if (m->m_len < (ALIGNBYTE * 2)) {
1582 			/*
1583 			 * rearrange mbuf data aligned
1584 			 *
1585 			 *        align 8 *       *       *       *       *
1586 			 *               +0123456789abcdef0123456789abcdef0
1587 			 * FROM m->m_data[___________abcdefghijklmn_______]
1588 			 *
1589 			 *               +0123456789abcdef0123456789abcdef0
1590 			 * TO   m->m_data[________abcdefghijklm___________] or
1591 			 *      m->m_data[________________abcdefghijklmn__]
1592 			 */
1593 			if ((alignoff != 0) && (m->m_len != 0)) {
1594 				chiplen = ALIGNBYTE - alignoff;
1595 				if (M_LEADINGSPACE(m) >= alignoff) {
1596 					ap = m->m_data - alignoff;
1597 					memmove(ap, m->m_data, m->m_len);
1598 					m->m_data = ap;
1599 				} else if (M_TRAILINGSPACE(m) >= chiplen) {
1600 					ap = m->m_data + chiplen;
1601 					memmove(ap, m->m_data, m->m_len);
1602 					m->m_data = ap;
1603 				} else {
1604 					/*
1605 					 * no space to align data. (M_READONLY?)
1606 					 * allocate new mbuf aligned,
1607 					 * and copy to it.
1608 					 */
1609 					MGET(x, M_DONTWAIT, m->m_type);
1610 					if (x == NULL) {
1611 						m_freem(m);
1612 						return ENOBUFS;
1613 					}
1614 					MCLAIM(x, m->m_owner);
1615 					if (m->m_flags & M_PKTHDR)
1616 						M_MOVE_PKTHDR(x, m);
1617 					x->m_len = m->m_len;
1618 					x->m_data = ALIGN_PTR(x->m_data,
1619 					    ALIGNBYTE);
1620 					memcpy(mtod(x, void *), mtod(m, void *),
1621 					    m->m_len);
1622 					p->m_next = x;
1623 					MFREE(m, x->m_next);
1624 					m = x;
1625 				}
1626 			}
1627 
1628 			/*
1629 			 * fill 1st mbuf at least 10byte
1630 			 *
1631 			 *        align 8 *       *       *       *       *
1632 			 *               +0123456789abcdef0123456789abcdef0
1633 			 * FROM m->m_data[________abcde___________________]
1634 			 *      m->m_data[__fg____________________________]
1635 			 *      m->m_data[_________________hi_____________]
1636 			 *      m->m_data[__________jk____________________]
1637 			 *      m->m_data[____l___________________________]
1638 			 *
1639 			 *               +0123456789abcdef0123456789abcdef0
1640 			 * TO   m->m_data[________abcdefghij______________]
1641 			 *      m->m_data[________________________________]
1642 			 *      m->m_data[________________________________]
1643 			 *      m->m_data[___________k____________________]
1644 			 *      m->m_data[____l___________________________]
1645 			 */
1646 			if (mt == NULL) {
1647 				mt = m;
1648 				while (mt->m_len == 0) {
1649 					mt = mt->m_next;
1650 					if (mt == NULL) {
1651 						m_freem(m);
1652 						return ENOBUFS;
1653 					}
1654 				}
1655 
1656 				/* mt = 1st mbuf, x = 2nd mbuf */
1657 				x = mt->m_next;
1658 				while (mt->m_len < MINBUFSIZE) {
1659 					if (x == NULL) {
1660 						m_freem(m);
1661 						return ENOBUFS;
1662 					}
1663 
1664 					alignoff = (uintptr_t)x->m_data &
1665 					    (ALIGNBYTE - 1);
1666 					chiplen = ALIGNBYTE - alignoff;
1667 					if (chiplen > x->m_len) {
1668 						chiplen = x->m_len;
1669 					} else if ((mt->m_len + chiplen) <
1670 					    MINBUFSIZE) {
1671 						/*
1672 						 * next mbuf should be greater
1673 						 * than ALIGNBYTE?
1674 						 */
1675 						if (x->m_len >= (chiplen +
1676 						    ALIGNBYTE * 2))
1677 							chiplen += ALIGNBYTE;
1678 						else
1679 							chiplen = x->m_len;
1680 					}
1681 
1682 					if (chiplen &&
1683 					    (M_TRAILINGSPACE(mt) < chiplen)) {
1684 						/*
1685 						 * move data to the begining of
1686 						 * m_dat[] (aligned) to en-
1687 						 * large trailingspace
1688 						 */
1689 						if (mt->m_flags & M_EXT) {
1690 							ap = mt->m_ext.ext_buf;
1691 						} else if (mt->m_flags &
1692 						    M_PKTHDR) {
1693 							ap = mt->m_pktdat;
1694 						} else {
1695 							ap = mt->m_dat;
1696 						}
1697 						ap = ALIGN_PTR(ap, ALIGNBYTE);
1698 						memcpy(ap, mt->m_data, mt->m_len);
1699 						mt->m_data = ap;
1700 					}
1701 
1702 					if (chiplen &&
1703 					    (M_TRAILINGSPACE(mt) >= chiplen)) {
1704 						memcpy(mt->m_data + mt->m_len,
1705 						    x->m_data, chiplen);
1706 						mt->m_len += chiplen;
1707 						m_adj(x, chiplen);
1708 					}
1709 
1710 					x = x->m_next;
1711 				}
1712 			}
1713 
1714 		} else {
1715 			mt = m;
1716 
1717 			/*
1718 			 * allocate new mbuf x, and rearrange as below;
1719 			 *
1720 			 *        align 8 *       *       *       *       *
1721 			 *               +0123456789abcdef0123456789abcdef0
1722 			 * FROM m->m_data[____________abcdefghijklmnopq___]
1723 			 *
1724 			 *               +0123456789abcdef0123456789abcdef0
1725 			 * TO   x->m_data[________abcdefghijkl____________]
1726 			 *      m->m_data[________________________mnopq___]
1727 			 *
1728 			 */
1729 			if (alignoff != 0) {
1730 				/* at least ALIGNBYTE */
1731 				chiplen = ALIGNBYTE - alignoff + ALIGNBYTE;
1732 
1733 				MGET(x, M_DONTWAIT, m->m_type);
1734 				if (x == NULL) {
1735 					m_freem(m);
1736 					return ENOBUFS;
1737 				}
1738 				MCLAIM(x, m->m_owner);
1739 				if (m->m_flags & M_PKTHDR)
1740 					M_MOVE_PKTHDR(x, m);
1741 				x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE);
1742 				memcpy(mtod(x, void *), mtod(m, void *),
1743 				    chiplen);
1744 				x->m_len = chiplen;
1745 				x->m_next = m;
1746 				m_adj(m, chiplen);
1747 
1748 				if (p == NULL)
1749 					m0 = x;
1750 				else
1751 					p->m_next = x;
1752 			}
1753 		}
1754 		p = m;
1755 	}
1756 	*mp = m0;
1757 
1758 	return 0;
1759 }
1760 
1761 static int
1762 enet_encap_txring(struct enet_softc *sc, struct mbuf **mp)
1763 {
1764 	bus_dmamap_t map;
1765 	struct mbuf *m;
1766 	int csumflags, idx, i, error;
1767 	uint32_t flags1, flags2;
1768 
1769 	idx = sc->sc_tx_prodidx;
1770 	map = sc->sc_txsoft[idx].txs_dmamap;
1771 
1772 	/* align mbuf data for claim of ENET */
1773 	error = enet_encap_mbufalign(mp);
1774 	if (error != 0)
1775 		return error;
1776 
1777 	m = *mp;
1778 	csumflags = m->m_pkthdr.csum_flags;
1779 
1780 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1781 	    BUS_DMA_NOWAIT);
1782 	if (error != 0) {
1783 		device_printf(sc->sc_dev,
1784 		    "Error mapping mbuf into TX chain: error=%d\n", error);
1785 		m_freem(m);
1786 		return error;
1787 	}
1788 
1789 	if (map->dm_nsegs > sc->sc_tx_free) {
1790 		bus_dmamap_unload(sc->sc_dmat, map);
1791 		device_printf(sc->sc_dev,
1792 		    "too many mbuf chain %d\n", map->dm_nsegs);
1793 		m_freem(m);
1794 		return ENOBUFS;
1795 	}
1796 
1797 	/* fill protocol cksum zero beforehand */
1798 	if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 |
1799 	    M_CSUM_UDPv6 | M_CSUM_TCPv6)) {
1800 		struct mbuf *m1;
1801 		int ehlen, moff;
1802 		uint16_t etype;
1803 
1804 		m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype);
1805 		switch (ntohs(etype)) {
1806 		case ETHERTYPE_IP:
1807 		case ETHERTYPE_IPV6:
1808 			ehlen = ETHER_HDR_LEN;
1809 			break;
1810 		case ETHERTYPE_VLAN:
1811 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1812 			break;
1813 		default:
1814 			ehlen = 0;
1815 			break;
1816 		}
1817 
1818 		if (ehlen) {
1819 			m1 = m_getptr(m, ehlen +
1820 			    M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) +
1821 			    M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data),
1822 			    &moff);
1823 			if (m1 != NULL)
1824 				*(uint16_t *)(mtod(m1, char *) + moff) = 0;
1825 		}
1826 	}
1827 
1828 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1829 	    BUS_DMASYNC_PREWRITE);
1830 
1831 	for (i = 0; i < map->dm_nsegs; i++) {
1832 		flags1 = TXFLAGS1_R;
1833 		flags2 = 0;
1834 
1835 		if (i == 0) {
1836 			flags1 |= TXFLAGS1_T1;	/* mark as first segment */
1837 			sc->sc_txsoft[idx].txs_mbuf = m;
1838 		}
1839 
1840 		/* checksum offloading */
1841 		if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 |
1842 		    M_CSUM_UDPv6 | M_CSUM_TCPv6))
1843 			flags2 |= TXFLAGS2_PINS;
1844 		if (csumflags & (M_CSUM_IPv4))
1845 			flags2 |= TXFLAGS2_IINS;
1846 
1847 		if (i == map->dm_nsegs - 1) {
1848 			/* mark last segment */
1849 			flags1 |= TXFLAGS1_L | TXFLAGS1_TC;
1850 			flags2 |= TXFLAGS2_INT;
1851 		}
1852 		if (idx == ENET_TX_RING_CNT - 1) {
1853 			/* mark end of ring */
1854 			flags1 |= TXFLAGS1_W;
1855 		}
1856 
1857 		sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr;
1858 		sc->sc_txdesc_ring[idx].tx_flags2 = flags2;
1859 		sc->sc_txdesc_ring[idx].tx_flags3 = 0;
1860 		sc->sc_txdesc_ring[idx].tx_flags1_len =
1861 		    flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len);
1862 
1863 		TXDESC_WRITEOUT(idx);
1864 
1865 		idx = ENET_TX_NEXTIDX(idx);
1866 		sc->sc_tx_free--;
1867 	}
1868 
1869 	sc->sc_tx_prodidx = idx;
1870 
1871 	return 0;
1872 }
1873 
1874 /*
1875  * device initialize
1876  */
1877 static int
1878 enet_init_plls(struct enet_softc *sc)
1879 {
1880 #if NIMXCCM > 0
1881 	/* PLL power up */
1882 	if (imx6_pll_power(CCM_ANALOG_PLL_ENET, 1) != 0) {
1883 		aprint_error_dev(sc->sc_dev,
1884 		    "couldn't enable CCM_ANALOG_PLL_ENET\n");
1885 		return -1;
1886 	}
1887 #endif
1888 
1889 	return 0;
1890 }
1891 
1892 static int
1893 enet_init_regs(struct enet_softc *sc, int init)
1894 {
1895 	struct mii_data *mii;
1896 	struct ifmedia_entry *ife;
1897 	paddr_t paddr;
1898 	uint32_t val;
1899 	int fulldup, ecr_speed, rcr_speed, flowctrl;
1900 
1901 	if (init) {
1902 		fulldup = 1;
1903 		ecr_speed = ENET_ECR_SPEED;
1904 		rcr_speed = 0;
1905 		flowctrl = 0;
1906 	} else {
1907 		mii = &sc->sc_mii;
1908 		ife = mii->mii_media.ifm_cur;
1909 
1910 		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX)
1911 			fulldup = 1;
1912 		else
1913 			fulldup = 0;
1914 
1915 		switch (IFM_SUBTYPE(ife->ifm_media)) {
1916 		case IFM_10_T:
1917 			ecr_speed = 0;
1918 			rcr_speed = ENET_RCR_RMII_10T;
1919 			break;
1920 		case IFM_100_TX:
1921 			ecr_speed = 0;
1922 			rcr_speed = 0;
1923 			break;
1924 		default:
1925 			ecr_speed = ENET_ECR_SPEED;
1926 			rcr_speed = 0;
1927 			break;
1928 		}
1929 
1930 		flowctrl = sc->sc_flowflags & IFM_FLOW;
1931 	}
1932 
1933 	/* reset */
1934 	ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET);
1935 
1936 	/* mask and clear all interrupt */
1937 	ENET_REG_WRITE(sc, ENET_EIMR, 0);
1938 	ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff);
1939 
1940 	/* full duplex */
1941 	ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0);
1942 
1943 	/* clear and enable MIB register */
1944 	ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR);
1945 	ENET_REG_WRITE(sc, ENET_MIBC, 0);
1946 
1947 	/* MII speed setup. MDCclk(=2.5MHz) = PLL6clk/((val+1)*2) */
1948 	val = (imx6_get_clock(IMX6CLK_PLL6) / 500000 - 1) / 10;
1949 	ENET_REG_WRITE(sc, ENET_MSCR, val);
1950 
1951 	/* Opcode/Pause Duration */
1952 	ENET_REG_WRITE(sc, ENET_OPD, 0x00010020);
1953 
1954 	/* Receive FIFO */
1955 	ENET_REG_WRITE(sc, ENET_RSFL, 16);	/* RxFIFO Section Full */
1956 	ENET_REG_WRITE(sc, ENET_RSEM, 0x84);	/* RxFIFO Section Empty */
1957 	ENET_REG_WRITE(sc, ENET_RAEM, 8);	/* RxFIFO Almost Empty */
1958 	ENET_REG_WRITE(sc, ENET_RAFL, 8);	/* RxFIFO Almost Full */
1959 
1960 	/* Transmit FIFO */
1961 	ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD |
1962 	    ENET_TFWR_FIFO(128));		/* TxFIFO Watermark */
1963 	ENET_REG_WRITE(sc, ENET_TSEM, 0);	/* TxFIFO Section Empty */
1964 	ENET_REG_WRITE(sc, ENET_TAEM, 256);	/* TxFIFO Almost Empty */
1965 	ENET_REG_WRITE(sc, ENET_TAFL, 8);	/* TxFIFO Almost Full */
1966 	ENET_REG_WRITE(sc, ENET_TIPG, 12);	/* Tx Inter-Packet Gap */
1967 
1968 	/* hardware checksum is default off (override in TX descripter) */
1969 	ENET_REG_WRITE(sc, ENET_TACC, 0);
1970 
1971 	/*
1972 	 * align ethernet payload on 32bit, discard frames with MAC layer error,
1973 	 * and don't discard checksum error
1974 	 */
1975 	ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS);
1976 
1977 	/* maximum frame size */
1978 	val = ENET_DEFAULT_PKT_LEN;
1979 	ENET_REG_WRITE(sc, ENET_FTRL, val);	/* Frame Truncation Length */
1980 	ENET_REG_WRITE(sc, ENET_RCR,
1981 	    ENET_RCR_PADEN |			/* RX frame padding remove */
1982 	    ENET_RCR_RGMII_EN |			/* use RGMII */
1983 	    (flowctrl ? ENET_RCR_FCE : 0) |	/* flow control enable */
1984 	    rcr_speed |
1985 	    (fulldup ? 0 : ENET_RCR_DRT) |
1986 	    ENET_RCR_MAX_FL(val));
1987 
1988 	/* Maximum Receive BufSize per one descriptor */
1989 	ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE);
1990 
1991 
1992 	/* TX/RX Descriptor Physical Address */
1993 	paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr;
1994 	ENET_REG_WRITE(sc, ENET_TDSR, paddr);
1995 	paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr;
1996 	ENET_REG_WRITE(sc, ENET_RDSR, paddr);
1997 	/* sync cache */
1998 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0,
1999 	    sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2000 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0,
2001 	    sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2002 
2003 	/* enable interrupts */
2004 	ENET_REG_WRITE(sc, ENET_EIMR,
2005 	    ENET_EIR_TXF |
2006 	    ENET_EIR_RXF |
2007 	    ENET_EIR_MII |
2008 	    ENET_EIR_EBERR |
2009 	    0);
2010 
2011 	/* enable ether */
2012 	ENET_REG_WRITE(sc, ENET_ECR,
2013 #if _BYTE_ORDER == _LITTLE_ENDIAN
2014 	    ENET_ECR_DBSWP |
2015 #endif
2016 	    ENET_ECR_SPEED |	/* default 1000Mbps mode */
2017 	    ENET_ECR_EN1588 |	/* use enhanced TX/RX descriptor */
2018 	    ENET_ECR_ETHEREN);	/* Ethernet Enable */
2019 
2020 	return 0;
2021 }
2022 
2023 static int
2024 enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp,
2025               bus_dmamap_t *mapp)
2026 {
2027 	bus_dma_segment_t seglist[1];
2028 	int nsegs, error;
2029 
2030 	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist,
2031 	    1, &nsegs, M_WAITOK)) != 0) {
2032 		device_printf(sc->sc_dev,
2033 		    "unable to allocate DMA buffer, error=%d\n", error);
2034 		goto fail_alloc;
2035 	}
2036 
2037 	if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp,
2038 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2039 		device_printf(sc->sc_dev,
2040 		    "unable to map DMA buffer, error=%d\n",
2041 		    error);
2042 		goto fail_map;
2043 	}
2044 
2045 	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2046 	    BUS_DMA_NOWAIT, mapp)) != 0) {
2047 		device_printf(sc->sc_dev,
2048 		    "unable to create DMA map, error=%d\n", error);
2049 		goto fail_create;
2050 	}
2051 
2052 	if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL,
2053 	    BUS_DMA_NOWAIT)) != 0) {
2054 		aprint_error_dev(sc->sc_dev,
2055 		    "unable to load DMA map, error=%d\n", error);
2056 		goto fail_load;
2057 	}
2058 
2059 	return 0;
2060 
2061  fail_load:
2062 	bus_dmamap_destroy(sc->sc_dmat, *mapp);
2063  fail_create:
2064 	bus_dmamem_unmap(sc->sc_dmat, *addrp, size);
2065  fail_map:
2066 	bus_dmamem_free(sc->sc_dmat, seglist, 1);
2067  fail_alloc:
2068 	return error;
2069 }
2070