xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision 9616dacfef448e70e3fbbd865bddf60d54b656c5)
1 /* $NetBSD: dwc_gmac.c,v 1.37 2016/12/15 09:28:05 ozaki-r Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.37 2016/12/15 09:28:05 ozaki-r Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #include "opt_inet.h"
49 
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56 #include <sys/cprng.h>
57 
58 #include <net/if.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61 #include <net/bpf.h>
62 #ifdef INET
63 #include <netinet/if_inarp.h>
64 #endif
65 
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/ic/dwc_gmac_reg.h>
69 #include <dev/ic/dwc_gmac_var.h>
70 
71 static int dwc_gmac_miibus_read_reg(device_t, int, int);
72 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
73 static void dwc_gmac_miibus_statchg(struct ifnet *);
74 
75 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
77 			 uint8_t enaddr[ETHER_ADDR_LEN]);
78 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
80 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
83 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
87 static int dwc_gmac_init(struct ifnet *ifp);
88 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
89 static void dwc_gmac_start(struct ifnet *ifp);
90 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
91 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
92 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
93 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
94 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
95 static int dwc_gmac_ifflags_cb(struct ethercom *);
96 static uint32_t	bitrev32(uint32_t x);
97 
98 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
99 				    *sizeof(struct dwc_gmac_dev_dmadesc))
100 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
101 
102 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
103 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
104 
105 
106 
107 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
108 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
109 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
110 
111 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
112 				GMAC_DMA_INT_FBE|	\
113 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
114 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
115 				GMAC_DMA_INT_TJE)
116 
117 #define	AWIN_DEF_MAC_INTRMASK	\
118 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
119 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
120 
121 
122 #ifdef DWC_GMAC_DEBUG
123 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
124 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
125 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
126 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
127 static void dwc_dump_status(struct dwc_gmac_softc *sc);
128 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
129 #endif
130 
131 void
132 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
133 {
134 	uint8_t enaddr[ETHER_ADDR_LEN];
135 	uint32_t maclo, machi;
136 	struct mii_data * const mii = &sc->sc_mii;
137 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
138 	prop_dictionary_t dict;
139 	int s;
140 
141 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
142 	sc->sc_mii_clk = mii_clk & 7;
143 
144 	dict = device_properties(sc->sc_dev);
145 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
146 	if (ea != NULL) {
147 		/*
148 		 * If the MAC address is overriden by a device property,
149 		 * use that.
150 		 */
151 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
152 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
153 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
154 	} else {
155 		/*
156 		 * If we did not get an externaly configure address,
157 		 * try to read one from the current filter setup,
158 		 * before resetting the chip.
159 		 */
160 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
161 		    AWIN_GMAC_MAC_ADDR0LO);
162 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
163 		    AWIN_GMAC_MAC_ADDR0HI);
164 
165 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
166 			/* fake MAC address */
167 			maclo = 0x00f2 | (cprng_strong32() << 16);
168 			machi = cprng_strong32();
169 		}
170 
171 		enaddr[0] = maclo & 0x0ff;
172 		enaddr[1] = (maclo >> 8) & 0x0ff;
173 		enaddr[2] = (maclo >> 16) & 0x0ff;
174 		enaddr[3] = (maclo >> 24) & 0x0ff;
175 		enaddr[4] = machi & 0x0ff;
176 		enaddr[5] = (machi >> 8) & 0x0ff;
177 	}
178 
179 	/*
180 	 * Init chip and do initial setup
181 	 */
182 	if (dwc_gmac_reset(sc) != 0)
183 		return;	/* not much to cleanup, haven't attached yet */
184 	dwc_gmac_write_hwaddr(sc, enaddr);
185 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
186 	    ether_sprintf(enaddr));
187 
188 	/*
189 	 * Allocate Tx and Rx rings
190 	 */
191 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
192 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
193 		goto fail;
194 	}
195 
196 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
197 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
198 		goto fail;
199 	}
200 
201 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
202 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
203 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
204 		goto fail;
205 	}
206 
207 	/*
208 	 * Prepare interface data
209 	 */
210 	ifp->if_softc = sc;
211 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
212 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
213 	ifp->if_ioctl = dwc_gmac_ioctl;
214 	ifp->if_start = dwc_gmac_start;
215 	ifp->if_init = dwc_gmac_init;
216 	ifp->if_stop = dwc_gmac_stop;
217 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
218 	IFQ_SET_READY(&ifp->if_snd);
219 
220 	/*
221 	 * Attach MII subdevices
222 	 */
223 	sc->sc_ec.ec_mii = &sc->sc_mii;
224 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
225         mii->mii_ifp = ifp;
226         mii->mii_readreg = dwc_gmac_miibus_read_reg;
227         mii->mii_writereg = dwc_gmac_miibus_write_reg;
228         mii->mii_statchg = dwc_gmac_miibus_statchg;
229         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
230 	    MIIF_DOPAUSE);
231 
232         if (LIST_EMPTY(&mii->mii_phys)) {
233                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
234                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
235                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
236         } else {
237                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
238         }
239 
240 	/*
241 	 * We can support 802.1Q VLAN-sized frames.
242 	 */
243 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
244 
245 	/*
246 	 * Ready, attach interface
247 	 */
248 	if_attach(ifp);
249 	ether_ifattach(ifp, enaddr);
250 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
251 
252 	/*
253 	 * Enable interrupts
254 	 */
255 	s = splnet();
256 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
257 	    AWIN_DEF_MAC_INTRMASK);
258 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
259 	    GMAC_DEF_DMA_INT_MASK);
260 	splx(s);
261 
262 	return;
263 
264 fail:
265 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
266 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
267 }
268 
269 
270 
271 static int
272 dwc_gmac_reset(struct dwc_gmac_softc *sc)
273 {
274 	size_t cnt;
275 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
276 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
277 	for (cnt = 0; cnt < 3000; cnt++) {
278 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
279 		    & GMAC_BUSMODE_RESET) == 0)
280 			return 0;
281 		delay(10);
282 	}
283 
284 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
285 	return EIO;
286 }
287 
288 static void
289 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
290     uint8_t enaddr[ETHER_ADDR_LEN])
291 {
292 	uint32_t lo, hi;
293 
294 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
295 	    | (enaddr[3] << 24);
296 	hi = enaddr[4] | (enaddr[5] << 8);
297 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
298 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
299 }
300 
301 static int
302 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
303 {
304 	struct dwc_gmac_softc * const sc = device_private(self);
305 	uint16_t mii;
306 	size_t cnt;
307 	int rv = 0;
308 
309 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
310 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
311 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
312 	    | GMAC_MII_BUSY;
313 
314 	mutex_enter(&sc->sc_mdio_lock);
315 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
316 
317 	for (cnt = 0; cnt < 1000; cnt++) {
318 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
319 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
320 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
321 			    AWIN_GMAC_MAC_MIIDATA);
322 			break;
323 		}
324 		delay(10);
325 	}
326 
327 	mutex_exit(&sc->sc_mdio_lock);
328 
329 	return rv;
330 }
331 
332 static void
333 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
334 {
335 	struct dwc_gmac_softc * const sc = device_private(self);
336 	uint16_t mii;
337 	size_t cnt;
338 
339 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
340 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
341 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
342 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
343 
344 	mutex_enter(&sc->sc_mdio_lock);
345 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
346 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
347 
348 	for (cnt = 0; cnt < 1000; cnt++) {
349 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
350 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
351 			break;
352 		delay(10);
353 	}
354 
355 	mutex_exit(&sc->sc_mdio_lock);
356 }
357 
358 static int
359 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
360 	struct dwc_gmac_rx_ring *ring)
361 {
362 	struct dwc_gmac_rx_data *data;
363 	bus_addr_t physaddr;
364 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
365 	int error, i, next;
366 
367 	ring->r_cur = ring->r_next = 0;
368 	memset(ring->r_desc, 0, descsize);
369 
370 	/*
371 	 * Pre-allocate Rx buffers and populate Rx ring.
372 	 */
373 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
374 		struct dwc_gmac_dev_dmadesc *desc;
375 
376 		data = &sc->sc_rxq.r_data[i];
377 
378 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
379 		if (data->rd_m == NULL) {
380 			aprint_error_dev(sc->sc_dev,
381 			    "could not allocate rx mbuf #%d\n", i);
382 			error = ENOMEM;
383 			goto fail;
384 		}
385 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
386 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
387 		if (error != 0) {
388 			aprint_error_dev(sc->sc_dev,
389 			    "could not create DMA map\n");
390 			data->rd_map = NULL;
391 			goto fail;
392 		}
393 		MCLGET(data->rd_m, M_DONTWAIT);
394 		if (!(data->rd_m->m_flags & M_EXT)) {
395 			aprint_error_dev(sc->sc_dev,
396 			    "could not allocate mbuf cluster #%d\n", i);
397 			error = ENOMEM;
398 			goto fail;
399 		}
400 
401 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
402 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
403 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
404 		if (error != 0) {
405 			aprint_error_dev(sc->sc_dev,
406 			    "could not load rx buf DMA map #%d", i);
407 			goto fail;
408 		}
409 		physaddr = data->rd_map->dm_segs[0].ds_addr;
410 
411 		desc = &sc->sc_rxq.r_desc[i];
412 		desc->ddesc_data = htole32(physaddr);
413 		next = RX_NEXT(i);
414 		desc->ddesc_next = htole32(ring->r_physaddr
415 		    + next * sizeof(*desc));
416 		desc->ddesc_cntl = htole32(
417 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
418 		    DDESC_CNTL_RXCHAIN);
419 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
420 	}
421 
422 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
423 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
424 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
425 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
426 	    ring->r_physaddr);
427 
428 	return 0;
429 
430 fail:
431 	dwc_gmac_free_rx_ring(sc, ring);
432 	return error;
433 }
434 
435 static void
436 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
437 	struct dwc_gmac_rx_ring *ring)
438 {
439 	struct dwc_gmac_dev_dmadesc *desc;
440 	int i;
441 
442 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
443 		desc = &sc->sc_rxq.r_desc[i];
444 		desc->ddesc_cntl = htole32(
445 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
446 		    DDESC_CNTL_RXCHAIN);
447 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
448 	}
449 
450 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
451 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
452 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
453 
454 	ring->r_cur = ring->r_next = 0;
455 	/* reset DMA address to start of ring */
456 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
457 	    sc->sc_rxq.r_physaddr);
458 }
459 
460 static int
461 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
462 {
463 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
464 		sizeof(struct dwc_gmac_dev_dmadesc);
465 	int error, nsegs;
466 	void *rings;
467 
468 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
469 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
470 	if (error != 0) {
471 		aprint_error_dev(sc->sc_dev,
472 		    "could not create desc DMA map\n");
473 		sc->sc_dma_ring_map = NULL;
474 		goto fail;
475 	}
476 
477 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
478 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
479 	if (error != 0) {
480 		aprint_error_dev(sc->sc_dev,
481 		    "could not map DMA memory\n");
482 		goto fail;
483 	}
484 
485 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
486 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
487 	if (error != 0) {
488 		aprint_error_dev(sc->sc_dev,
489 		    "could not allocate DMA memory\n");
490 		goto fail;
491 	}
492 
493 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
494 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
495 	if (error != 0) {
496 		aprint_error_dev(sc->sc_dev,
497 		    "could not load desc DMA map\n");
498 		goto fail;
499 	}
500 
501 	/* give first AWGE_RX_RING_COUNT to the RX side */
502 	sc->sc_rxq.r_desc = rings;
503 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
504 
505 	/* and next rings to the TX side */
506 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
507 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
508 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
509 
510 	return 0;
511 
512 fail:
513 	dwc_gmac_free_dma_rings(sc);
514 	return error;
515 }
516 
517 static void
518 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
519 {
520 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
521 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
522 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
523 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
524 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
525 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
526 }
527 
528 static void
529 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
530 {
531 	struct dwc_gmac_rx_data *data;
532 	int i;
533 
534 	if (ring->r_desc == NULL)
535 		return;
536 
537 
538 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
539 		data = &ring->r_data[i];
540 
541 		if (data->rd_map != NULL) {
542 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
543 			    AWGE_RX_RING_COUNT
544 				*sizeof(struct dwc_gmac_dev_dmadesc),
545 			    BUS_DMASYNC_POSTREAD);
546 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
547 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
548 		}
549 		if (data->rd_m != NULL)
550 			m_freem(data->rd_m);
551 	}
552 }
553 
554 static int
555 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
556 	struct dwc_gmac_tx_ring *ring)
557 {
558 	int i, error = 0;
559 
560 	ring->t_queued = 0;
561 	ring->t_cur = ring->t_next = 0;
562 
563 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
564 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
565 	    TX_DESC_OFFSET(0),
566 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
567 	    BUS_DMASYNC_POSTWRITE);
568 
569 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
570 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
571 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
572 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
573 		    &ring->t_data[i].td_map);
574 		if (error != 0) {
575 			aprint_error_dev(sc->sc_dev,
576 			    "could not create TX DMA map #%d\n", i);
577 			ring->t_data[i].td_map = NULL;
578 			goto fail;
579 		}
580 		ring->t_desc[i].ddesc_next = htole32(
581 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
582 		    *TX_NEXT(i));
583 	}
584 
585 	return 0;
586 
587 fail:
588 	dwc_gmac_free_tx_ring(sc, ring);
589 	return error;
590 }
591 
592 static void
593 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
594 {
595 	/* 'end' is pointing one descriptor beyound the last we want to sync */
596 	if (end > start) {
597 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
598 		    TX_DESC_OFFSET(start),
599 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
600 		    ops);
601 		return;
602 	}
603 	/* sync from 'start' to end of ring */
604 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
605 	    TX_DESC_OFFSET(start),
606 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
607 	    ops);
608 	/* sync from start of ring to 'end' */
609 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
610 	    TX_DESC_OFFSET(0),
611 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
612 	    ops);
613 }
614 
615 static void
616 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
617 	struct dwc_gmac_tx_ring *ring)
618 {
619 	int i;
620 
621 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
622 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
623 
624 		if (data->td_m != NULL) {
625 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
626 			    0, data->td_active->dm_mapsize,
627 			    BUS_DMASYNC_POSTWRITE);
628 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
629 			m_freem(data->td_m);
630 			data->td_m = NULL;
631 		}
632 	}
633 
634 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
635 	    TX_DESC_OFFSET(0),
636 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
637 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
638 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
639 	    sc->sc_txq.t_physaddr);
640 
641 	ring->t_queued = 0;
642 	ring->t_cur = ring->t_next = 0;
643 }
644 
645 static void
646 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
647 	struct dwc_gmac_tx_ring *ring)
648 {
649 	int i;
650 
651 	/* unload the maps */
652 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
653 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
654 
655 		if (data->td_m != NULL) {
656 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
657 			    0, data->td_map->dm_mapsize,
658 			    BUS_DMASYNC_POSTWRITE);
659 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
660 			m_freem(data->td_m);
661 			data->td_m = NULL;
662 		}
663 	}
664 
665 	/* and actually free them */
666 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
667 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
668 
669 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
670 	}
671 }
672 
673 static void
674 dwc_gmac_miibus_statchg(struct ifnet *ifp)
675 {
676 	struct dwc_gmac_softc * const sc = ifp->if_softc;
677 	struct mii_data * const mii = &sc->sc_mii;
678 	uint32_t conf, flow;
679 
680 	/*
681 	 * Set MII or GMII interface based on the speed
682 	 * negotiated by the PHY.
683 	 */
684 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
685 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
686 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
687 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
688 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
689 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
690 	    | AWIN_GMAC_MAC_CONF_ACS
691 	    | AWIN_GMAC_MAC_CONF_RXENABLE
692 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
693 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
694 	case IFM_10_T:
695 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
696 		break;
697 	case IFM_100_TX:
698 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
699 			AWIN_GMAC_MAC_CONF_MIISEL;
700 		break;
701 	case IFM_1000_T:
702 		break;
703 	}
704 
705 	flow = 0;
706 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
707 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
708 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
709 	}
710 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
711 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
712 	}
713 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
714 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
715 	}
716 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
717 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
718 
719 #ifdef DWC_GMAC_DEBUG
720 	aprint_normal_dev(sc->sc_dev,
721 	    "setting MAC conf register: %08x\n", conf);
722 #endif
723 
724 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
725 	    AWIN_GMAC_MAC_CONF, conf);
726 }
727 
728 static int
729 dwc_gmac_init(struct ifnet *ifp)
730 {
731 	struct dwc_gmac_softc *sc = ifp->if_softc;
732 	uint32_t ffilt;
733 
734 	if (ifp->if_flags & IFF_RUNNING)
735 		return 0;
736 
737 	dwc_gmac_stop(ifp, 0);
738 
739 	/*
740 	 * Configure DMA burst/transfer mode and RX/TX priorities.
741 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
742 	 */
743 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
744 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
745 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
746 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
747 
748 	/*
749 	 * Set up address filter
750 	 */
751 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
752 	if (ifp->if_flags & IFF_PROMISC) {
753 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
754 	} else {
755 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
756 	}
757 	if (ifp->if_flags & IFF_BROADCAST) {
758 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
759 	} else {
760 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
761 	}
762 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
763 
764 	/*
765 	 * Set up multicast filter
766 	 */
767 	dwc_gmac_setmulti(sc);
768 
769 	/*
770 	 * Set up dma pointer for RX and TX ring
771 	 */
772 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
773 	    sc->sc_rxq.r_physaddr);
774 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
775 	    sc->sc_txq.t_physaddr);
776 
777 	/*
778 	 * Start RX/TX part
779 	 */
780 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
781 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
782 	    GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
783 
784 	ifp->if_flags |= IFF_RUNNING;
785 	ifp->if_flags &= ~IFF_OACTIVE;
786 
787 	return 0;
788 }
789 
790 static void
791 dwc_gmac_start(struct ifnet *ifp)
792 {
793 	struct dwc_gmac_softc *sc = ifp->if_softc;
794 	int old = sc->sc_txq.t_queued;
795 	int start = sc->sc_txq.t_cur;
796 	struct mbuf *m0;
797 
798 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
799 		return;
800 
801 	for (;;) {
802 		IFQ_POLL(&ifp->if_snd, m0);
803 		if (m0 == NULL)
804 			break;
805 		if (dwc_gmac_queue(sc, m0) != 0) {
806 			ifp->if_flags |= IFF_OACTIVE;
807 			break;
808 		}
809 		IFQ_DEQUEUE(&ifp->if_snd, m0);
810 		bpf_mtap(ifp, m0);
811 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
812 			ifp->if_flags |= IFF_OACTIVE;
813 			break;
814 		}
815 	}
816 
817 	if (sc->sc_txq.t_queued != old) {
818 		/* packets have been queued, kick it off */
819 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
820 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
821 
822 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
823 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
824 #ifdef DWC_GMAC_DEBUG
825 		dwc_dump_status(sc);
826 #endif
827 	}
828 }
829 
830 static void
831 dwc_gmac_stop(struct ifnet *ifp, int disable)
832 {
833 	struct dwc_gmac_softc *sc = ifp->if_softc;
834 
835 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
836 	    AWIN_GMAC_DMA_OPMODE,
837 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
838 	        AWIN_GMAC_DMA_OPMODE)
839 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
840 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
841 	    AWIN_GMAC_DMA_OPMODE,
842 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
843 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
844 
845 	mii_down(&sc->sc_mii);
846 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
847 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
848 }
849 
850 /*
851  * Add m0 to the TX ring
852  */
853 static int
854 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
855 {
856 	struct dwc_gmac_dev_dmadesc *desc = NULL;
857 	struct dwc_gmac_tx_data *data = NULL;
858 	bus_dmamap_t map;
859 	uint32_t flags, len, status;
860 	int error, i, first;
861 
862 #ifdef DWC_GMAC_DEBUG
863 	aprint_normal_dev(sc->sc_dev,
864 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
865 #endif
866 
867 	first = sc->sc_txq.t_cur;
868 	map = sc->sc_txq.t_data[first].td_map;
869 
870 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
871 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
872 	if (error != 0) {
873 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
874 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
875 		return error;
876 	}
877 
878 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
879 		bus_dmamap_unload(sc->sc_dmat, map);
880 		return ENOBUFS;
881 	}
882 
883 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
884 	status = 0;
885 	for (i = 0; i < map->dm_nsegs; i++) {
886 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
887 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
888 
889 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
890 		len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
891 
892 #ifdef DWC_GMAC_DEBUG
893 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
894 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
895 		    (unsigned long)map->dm_segs[i].ds_addr,
896 		    (unsigned long)map->dm_segs[i].ds_len,
897 		    flags, len);
898 #endif
899 
900 		desc->ddesc_cntl = htole32(len|flags);
901 		flags &= ~DDESC_CNTL_TXFIRST;
902 
903 		/*
904 		 * Defer passing ownership of the first descriptor
905 		 * until we are done.
906 		 */
907 		desc->ddesc_status = htole32(status);
908 		status |= DDESC_STATUS_OWNEDBYDEV;
909 
910 		sc->sc_txq.t_queued++;
911 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
912 	}
913 
914 	desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
915 
916 	data->td_m = m0;
917 	data->td_active = map;
918 
919 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
920 	    BUS_DMASYNC_PREWRITE);
921 
922 	/* Pass first to device */
923 	sc->sc_txq.t_desc[first].ddesc_status =
924 	    htole32(DDESC_STATUS_OWNEDBYDEV);
925 
926 	return 0;
927 }
928 
929 /*
930  * If the interface is up and running, only modify the receive
931  * filter when setting promiscuous or debug mode.  Otherwise fall
932  * through to ether_ioctl, which will reset the chip.
933  */
934 static int
935 dwc_gmac_ifflags_cb(struct ethercom *ec)
936 {
937 	struct ifnet *ifp = &ec->ec_if;
938 	struct dwc_gmac_softc *sc = ifp->if_softc;
939 	int change = ifp->if_flags ^ sc->sc_if_flags;
940 
941 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
942 		return ENETRESET;
943 	if ((change & IFF_PROMISC) != 0)
944 		dwc_gmac_setmulti(sc);
945 	return 0;
946 }
947 
948 static int
949 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
950 {
951 	struct dwc_gmac_softc *sc = ifp->if_softc;
952 	int s, error = 0;
953 
954 	s = splnet();
955 
956 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
957 		error = 0;
958 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
959 			;
960 		else if (ifp->if_flags & IFF_RUNNING) {
961 			/*
962 			 * Multicast list has changed; set the hardware filter
963 			 * accordingly.
964 			 */
965 			dwc_gmac_setmulti(sc);
966 		}
967 	}
968 
969 	/* Try to get things going again */
970 	if (ifp->if_flags & IFF_UP)
971 		dwc_gmac_start(ifp);
972 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
973 	splx(s);
974 	return error;
975 }
976 
977 static void
978 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
979 {
980 	struct ifnet *ifp = &sc->sc_ec.ec_if;
981 	struct dwc_gmac_tx_data *data;
982 	struct dwc_gmac_dev_dmadesc *desc;
983 	uint32_t status;
984 	int i, nsegs;
985 
986 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
987 #ifdef DWC_GMAC_DEBUG
988 		aprint_normal_dev(sc->sc_dev,
989 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
990 		    i, sc->sc_txq.t_queued);
991 #endif
992 
993 		/*
994 		 * i+1 does not need to be a valid descriptor,
995 		 * this is just a special notion to just sync
996 		 * a single tx descriptor (i)
997 		 */
998 		dwc_gmac_txdesc_sync(sc, i, i+1,
999 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1000 
1001 		desc = &sc->sc_txq.t_desc[i];
1002 		status = le32toh(desc->ddesc_status);
1003 		if (status & DDESC_STATUS_OWNEDBYDEV)
1004 			break;
1005 
1006 		data = &sc->sc_txq.t_data[i];
1007 		if (data->td_m == NULL)
1008 			continue;
1009 
1010 		ifp->if_opackets++;
1011 		nsegs = data->td_active->dm_nsegs;
1012 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1013 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1014 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1015 
1016 #ifdef DWC_GMAC_DEBUG
1017 		aprint_normal_dev(sc->sc_dev,
1018 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1019 		    "freeing mbuf %p\n", i, data->td_m);
1020 #endif
1021 
1022 		m_freem(data->td_m);
1023 		data->td_m = NULL;
1024 
1025 		sc->sc_txq.t_queued -= nsegs;
1026 	}
1027 
1028 	sc->sc_txq.t_next = i;
1029 
1030 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1031 		ifp->if_flags &= ~IFF_OACTIVE;
1032 	}
1033 }
1034 
1035 static void
1036 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1037 {
1038 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1039 	struct dwc_gmac_dev_dmadesc *desc;
1040 	struct dwc_gmac_rx_data *data;
1041 	bus_addr_t physaddr;
1042 	uint32_t status;
1043 	struct mbuf *m, *mnew;
1044 	int i, len, error;
1045 
1046 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1047 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1048 		    RX_DESC_OFFSET(i), sizeof(*desc),
1049 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1050 		desc = &sc->sc_rxq.r_desc[i];
1051 		data = &sc->sc_rxq.r_data[i];
1052 
1053 		status = le32toh(desc->ddesc_status);
1054 		if (status & DDESC_STATUS_OWNEDBYDEV)
1055 			break;
1056 
1057 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1058 #ifdef DWC_GMAC_DEBUG
1059 			aprint_normal_dev(sc->sc_dev,
1060 			    "RX error: descriptor status %08x, skipping\n",
1061 			    status);
1062 #endif
1063 			ifp->if_ierrors++;
1064 			goto skip;
1065 		}
1066 
1067 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1068 
1069 #ifdef DWC_GMAC_DEBUG
1070 		aprint_normal_dev(sc->sc_dev,
1071 		    "rx int: device is done with descriptor #%d, len: %d\n",
1072 		    i, len);
1073 #endif
1074 
1075 		/*
1076 		 * Try to get a new mbuf before passing this one
1077 		 * up, if that fails, drop the packet and reuse
1078 		 * the existing one.
1079 		 */
1080 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1081 		if (mnew == NULL) {
1082 			ifp->if_ierrors++;
1083 			goto skip;
1084 		}
1085 		MCLGET(mnew, M_DONTWAIT);
1086 		if ((mnew->m_flags & M_EXT) == 0) {
1087 			m_freem(mnew);
1088 			ifp->if_ierrors++;
1089 			goto skip;
1090 		}
1091 
1092 		/* unload old DMA map */
1093 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1094 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1095 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1096 
1097 		/* and reload with new mbuf */
1098 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1099 		    mtod(mnew, void*), MCLBYTES, NULL,
1100 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1101 		if (error != 0) {
1102 			m_freem(mnew);
1103 			/* try to reload old mbuf */
1104 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1105 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
1106 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1107 			if (error != 0) {
1108 				panic("%s: could not load old rx mbuf",
1109 				    device_xname(sc->sc_dev));
1110 			}
1111 			ifp->if_ierrors++;
1112 			goto skip;
1113 		}
1114 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1115 
1116 		/*
1117 		 * New mbuf loaded, update RX ring and continue
1118 		 */
1119 		m = data->rd_m;
1120 		data->rd_m = mnew;
1121 		desc->ddesc_data = htole32(physaddr);
1122 
1123 		/* finalize mbuf */
1124 		m->m_pkthdr.len = m->m_len = len;
1125 		m_set_rcvif(m, ifp);
1126 		m->m_flags |= M_HASFCS;
1127 
1128 		if_percpuq_enqueue(ifp->if_percpuq, m);
1129 
1130 skip:
1131 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1132 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1133 		desc->ddesc_cntl = htole32(
1134 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1135 		    DDESC_CNTL_RXCHAIN);
1136 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1137 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1138 		    RX_DESC_OFFSET(i), sizeof(*desc),
1139 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1140 	}
1141 
1142 	/* update RX pointer */
1143 	sc->sc_rxq.r_cur = i;
1144 
1145 }
1146 
1147 /*
1148  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1149  */
1150 static uint32_t
1151 bitrev32(uint32_t x)
1152 {
1153 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1154 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1155 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1156 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1157 
1158 	return (x >> 16) | (x << 16);
1159 }
1160 
1161 static void
1162 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1163 {
1164 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1165 	struct ether_multi *enm;
1166 	struct ether_multistep step;
1167 	uint32_t hashes[2] = { 0, 0 };
1168 	uint32_t ffilt, h;
1169 	int mcnt, s;
1170 
1171 	s = splnet();
1172 
1173 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1174 
1175 	if (ifp->if_flags & IFF_PROMISC) {
1176 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1177 		goto special_filter;
1178 	}
1179 
1180 	ifp->if_flags &= ~IFF_ALLMULTI;
1181 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1182 
1183 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1184 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1185 
1186 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1187 	mcnt = 0;
1188 	while (enm != NULL) {
1189 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1190 		    ETHER_ADDR_LEN) != 0) {
1191 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1192 			ifp->if_flags |= IFF_ALLMULTI;
1193 			goto special_filter;
1194 		}
1195 
1196 		h = bitrev32(
1197 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1198 		    ) >> 26;
1199 		hashes[h >> 5] |= (1 << (h & 0x1f));
1200 
1201 		mcnt++;
1202 		ETHER_NEXT_MULTI(step, enm);
1203 	}
1204 
1205 	if (mcnt)
1206 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1207 	else
1208 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1209 
1210 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1211 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1212 	    hashes[0]);
1213 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1214 	    hashes[1]);
1215 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1216 
1217 	splx(s);
1218 
1219 #ifdef DWC_GMAC_DEBUG
1220 	dwc_gmac_dump_ffilt(sc, ffilt);
1221 #endif
1222 	return;
1223 
1224 special_filter:
1225 #ifdef DWC_GMAC_DEBUG
1226 	dwc_gmac_dump_ffilt(sc, ffilt);
1227 #endif
1228 	/* no MAC hashes, ALLMULTI or PROMISC */
1229 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1230 	    ffilt);
1231 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1232 	    0xffffffff);
1233 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1234 	    0xffffffff);
1235 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1236 	splx(s);
1237 }
1238 
1239 int
1240 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1241 {
1242 	uint32_t status, dma_status;
1243 	int rv = 0;
1244 
1245 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1246 	if (status & AWIN_GMAC_MII_IRQ) {
1247 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1248 		    AWIN_GMAC_MII_STATUS);
1249 		rv = 1;
1250 		mii_pollstat(&sc->sc_mii);
1251 	}
1252 
1253 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1254 	    AWIN_GMAC_DMA_STATUS);
1255 
1256 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1257 		rv = 1;
1258 
1259 	if (dma_status & GMAC_DMA_INT_TIE)
1260 		dwc_gmac_tx_intr(sc);
1261 
1262 	if (dma_status & GMAC_DMA_INT_RIE)
1263 		dwc_gmac_rx_intr(sc);
1264 
1265 	/*
1266 	 * Check error conditions
1267 	 */
1268 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1269 		sc->sc_ec.ec_if.if_oerrors++;
1270 #ifdef DWC_GMAC_DEBUG
1271 		dwc_dump_and_abort(sc, "interrupt error condition");
1272 #endif
1273 	}
1274 
1275 	/* ack interrupt */
1276 	if (dma_status)
1277 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1278 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1279 
1280 	/*
1281 	 * Get more packets
1282 	 */
1283 	if (rv)
1284 		sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
1285 
1286 	return rv;
1287 }
1288 
1289 #ifdef DWC_GMAC_DEBUG
1290 static void
1291 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1292 {
1293 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1294 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1295 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1296 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1297 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1298 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1299 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1300 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1301 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1302 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1303 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1304 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1305 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1306 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1307 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1308 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1309 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1310 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1311 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1312 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1313 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1314 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1315 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1316 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1317 }
1318 
1319 static void
1320 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1321 {
1322 	int i;
1323 
1324 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1325 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1326 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1327 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1328 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1329 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1330 		    "data: %08x next: %08x\n",
1331 		    i, sc->sc_txq.t_physaddr +
1332 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1333 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1334 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1335 	}
1336 }
1337 
1338 static void
1339 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1340 {
1341 	int i;
1342 
1343 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1344 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1345 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1346 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1347 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1348 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1349 		    "data: %08x next: %08x\n",
1350 		    i, sc->sc_rxq.r_physaddr +
1351 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1352 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1353 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1354 	}
1355 }
1356 
1357 static void
1358 dwc_dump_status(struct dwc_gmac_softc *sc)
1359 {
1360 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1361 	     AWIN_GMAC_MAC_INTR);
1362 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1363 	     AWIN_GMAC_DMA_STATUS);
1364 	char buf[200];
1365 
1366 	/* print interrupt state */
1367 	snprintb(buf, sizeof(buf), "\177\20"
1368 	    "b\x10""NI\0"
1369 	    "b\x0f""AI\0"
1370 	    "b\x0e""ER\0"
1371 	    "b\x0d""FB\0"
1372 	    "b\x0a""ET\0"
1373 	    "b\x09""RW\0"
1374 	    "b\x08""RS\0"
1375 	    "b\x07""RU\0"
1376 	    "b\x06""RI\0"
1377 	    "b\x05""UN\0"
1378 	    "b\x04""OV\0"
1379 	    "b\x03""TJ\0"
1380 	    "b\x02""TU\0"
1381 	    "b\x01""TS\0"
1382 	    "b\x00""TI\0"
1383 	    "\0", dma_status);
1384 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1385 	    status, buf);
1386 }
1387 
1388 static void
1389 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1390 {
1391 	dwc_dump_status(sc);
1392 	dwc_gmac_dump_ffilt(sc,
1393 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1394 	dwc_gmac_dump_dma(sc);
1395 	dwc_gmac_dump_tx_desc(sc);
1396 	dwc_gmac_dump_rx_desc(sc);
1397 
1398 	panic("%s", msg);
1399 }
1400 
1401 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1402 {
1403 	char buf[200];
1404 
1405 	/* print filter setup */
1406 	snprintb(buf, sizeof(buf), "\177\20"
1407 	    "b\x1f""RA\0"
1408 	    "b\x0a""HPF\0"
1409 	    "b\x09""SAF\0"
1410 	    "b\x08""SAIF\0"
1411 	    "b\x05""DBF\0"
1412 	    "b\x04""PM\0"
1413 	    "b\x03""DAIF\0"
1414 	    "b\x02""HMC\0"
1415 	    "b\x01""HUC\0"
1416 	    "b\x00""PR\0"
1417 	    "\0", ffilt);
1418 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1419 }
1420 #endif
1421