xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision f89f6560d453f5e37386cc7938c072d2f528b9fa)
1 /* $NetBSD: dwc_gmac.c,v 1.32 2015/02/23 19:05:17 martin Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.32 2015/02/23 19:05:17 martin Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #include "opt_inet.h"
49 
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56 #include <sys/cprng.h>
57 
58 #include <net/if.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61 #include <net/bpf.h>
62 #ifdef INET
63 #include <netinet/if_inarp.h>
64 #endif
65 
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/ic/dwc_gmac_reg.h>
69 #include <dev/ic/dwc_gmac_var.h>
70 
71 static int dwc_gmac_miibus_read_reg(device_t, int, int);
72 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
73 static void dwc_gmac_miibus_statchg(struct ifnet *);
74 
75 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
77 			 uint8_t enaddr[ETHER_ADDR_LEN]);
78 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
80 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
83 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
87 static int dwc_gmac_init(struct ifnet *ifp);
88 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
89 static void dwc_gmac_start(struct ifnet *ifp);
90 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
91 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
92 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
93 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
94 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
95 static int dwc_gmac_ifflags_cb(struct ethercom *);
96 static uint32_t	bitrev32(uint32_t x);
97 
98 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
99 				    *sizeof(struct dwc_gmac_dev_dmadesc))
100 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
101 
102 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
103 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
104 
105 
106 
107 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
108 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
109 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
110 
111 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
112 				GMAC_DMA_INT_FBE|	\
113 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
114 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
115 				GMAC_DMA_INT_TJE)
116 
117 #define	AWIN_DEF_MAC_INTRMASK	\
118 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
119 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
120 
121 
122 #ifdef DWC_GMAC_DEBUG
123 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
124 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
125 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
126 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
127 static void dwc_dump_status(struct dwc_gmac_softc *sc);
128 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
129 #endif
130 
131 void
132 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
133 {
134 	uint8_t enaddr[ETHER_ADDR_LEN];
135 	uint32_t maclo, machi;
136 	struct mii_data * const mii = &sc->sc_mii;
137 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
138 	prop_dictionary_t dict;
139 	int s;
140 
141 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
142 	sc->sc_mii_clk = mii_clk & 7;
143 
144 	dict = device_properties(sc->sc_dev);
145 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
146 	if (ea != NULL) {
147 		/*
148 		 * If the MAC address is overriden by a device property,
149 		 * use that.
150 		 */
151 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
152 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
153 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
154 	} else {
155 		/*
156 		 * If we did not get an externaly configure address,
157 		 * try to read one from the current filter setup,
158 		 * before resetting the chip.
159 		 */
160 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
161 		    AWIN_GMAC_MAC_ADDR0LO);
162 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
163 		    AWIN_GMAC_MAC_ADDR0HI);
164 
165 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
166 			/* fake MAC address */
167 			maclo = 0x00f2 | (cprng_strong32() << 16);
168 			machi = cprng_strong32();
169 		}
170 
171 		enaddr[0] = maclo & 0x0ff;
172 		enaddr[1] = (maclo >> 8) & 0x0ff;
173 		enaddr[2] = (maclo >> 16) & 0x0ff;
174 		enaddr[3] = (maclo >> 24) & 0x0ff;
175 		enaddr[4] = machi & 0x0ff;
176 		enaddr[5] = (machi >> 8) & 0x0ff;
177 	}
178 
179 	/*
180 	 * Init chip and do initial setup
181 	 */
182 	if (dwc_gmac_reset(sc) != 0)
183 		return;	/* not much to cleanup, haven't attached yet */
184 	dwc_gmac_write_hwaddr(sc, enaddr);
185 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
186 	    ether_sprintf(enaddr));
187 
188 	/*
189 	 * Allocate Tx and Rx rings
190 	 */
191 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
192 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
193 		goto fail;
194 	}
195 
196 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
197 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
198 		goto fail;
199 	}
200 
201 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
202 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
203 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
204 		goto fail;
205 	}
206 
207 	/*
208 	 * Prepare interface data
209 	 */
210 	ifp->if_softc = sc;
211 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
212 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
213 	ifp->if_ioctl = dwc_gmac_ioctl;
214 	ifp->if_start = dwc_gmac_start;
215 	ifp->if_init = dwc_gmac_init;
216 	ifp->if_stop = dwc_gmac_stop;
217 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
218 	IFQ_SET_READY(&ifp->if_snd);
219 
220 	/*
221 	 * Attach MII subdevices
222 	 */
223 	sc->sc_ec.ec_mii = &sc->sc_mii;
224 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
225         mii->mii_ifp = ifp;
226         mii->mii_readreg = dwc_gmac_miibus_read_reg;
227         mii->mii_writereg = dwc_gmac_miibus_write_reg;
228         mii->mii_statchg = dwc_gmac_miibus_statchg;
229         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
230 	    MIIF_DOPAUSE);
231 
232         if (LIST_EMPTY(&mii->mii_phys)) {
233                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
234                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
235                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
236         } else {
237                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
238         }
239 
240 	/*
241 	 * Ready, attach interface
242 	 */
243 	if_attach(ifp);
244 	ether_ifattach(ifp, enaddr);
245 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
246 
247 	/*
248 	 * Enable interrupts
249 	 */
250 	s = splnet();
251 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
252 	    AWIN_DEF_MAC_INTRMASK);
253 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
254 	    GMAC_DEF_DMA_INT_MASK);
255 	splx(s);
256 
257 	return;
258 
259 fail:
260 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
261 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
262 }
263 
264 
265 
266 static int
267 dwc_gmac_reset(struct dwc_gmac_softc *sc)
268 {
269 	size_t cnt;
270 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
271 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
272 	for (cnt = 0; cnt < 3000; cnt++) {
273 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
274 		    & GMAC_BUSMODE_RESET) == 0)
275 			return 0;
276 		delay(10);
277 	}
278 
279 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
280 	return EIO;
281 }
282 
283 static void
284 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
285     uint8_t enaddr[ETHER_ADDR_LEN])
286 {
287 	uint32_t lo, hi;
288 
289 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
290 	    | (enaddr[3] << 24);
291 	hi = enaddr[4] | (enaddr[5] << 8);
292 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
293 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
294 }
295 
296 static int
297 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
298 {
299 	struct dwc_gmac_softc * const sc = device_private(self);
300 	uint16_t mii;
301 	size_t cnt;
302 	int rv = 0;
303 
304 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
305 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
306 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
307 	    | GMAC_MII_BUSY;
308 
309 	mutex_enter(&sc->sc_mdio_lock);
310 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
311 
312 	for (cnt = 0; cnt < 1000; cnt++) {
313 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
314 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
315 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
316 			    AWIN_GMAC_MAC_MIIDATA);
317 			break;
318 		}
319 		delay(10);
320 	}
321 
322 	mutex_exit(&sc->sc_mdio_lock);
323 
324 	return rv;
325 }
326 
327 static void
328 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
329 {
330 	struct dwc_gmac_softc * const sc = device_private(self);
331 	uint16_t mii;
332 	size_t cnt;
333 
334 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
335 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
336 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
337 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
338 
339 	mutex_enter(&sc->sc_mdio_lock);
340 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
341 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
342 
343 	for (cnt = 0; cnt < 1000; cnt++) {
344 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
345 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
346 			break;
347 		delay(10);
348 	}
349 
350 	mutex_exit(&sc->sc_mdio_lock);
351 }
352 
353 static int
354 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
355 	struct dwc_gmac_rx_ring *ring)
356 {
357 	struct dwc_gmac_rx_data *data;
358 	bus_addr_t physaddr;
359 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
360 	int error, i, next;
361 
362 	ring->r_cur = ring->r_next = 0;
363 	memset(ring->r_desc, 0, descsize);
364 
365 	/*
366 	 * Pre-allocate Rx buffers and populate Rx ring.
367 	 */
368 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
369 		struct dwc_gmac_dev_dmadesc *desc;
370 
371 		data = &sc->sc_rxq.r_data[i];
372 
373 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
374 		if (data->rd_m == NULL) {
375 			aprint_error_dev(sc->sc_dev,
376 			    "could not allocate rx mbuf #%d\n", i);
377 			error = ENOMEM;
378 			goto fail;
379 		}
380 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
381 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
382 		if (error != 0) {
383 			aprint_error_dev(sc->sc_dev,
384 			    "could not create DMA map\n");
385 			data->rd_map = NULL;
386 			goto fail;
387 		}
388 		MCLGET(data->rd_m, M_DONTWAIT);
389 		if (!(data->rd_m->m_flags & M_EXT)) {
390 			aprint_error_dev(sc->sc_dev,
391 			    "could not allocate mbuf cluster #%d\n", i);
392 			error = ENOMEM;
393 			goto fail;
394 		}
395 
396 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
397 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
398 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
399 		if (error != 0) {
400 			aprint_error_dev(sc->sc_dev,
401 			    "could not load rx buf DMA map #%d", i);
402 			goto fail;
403 		}
404 		physaddr = data->rd_map->dm_segs[0].ds_addr;
405 
406 		desc = &sc->sc_rxq.r_desc[i];
407 		desc->ddesc_data = htole32(physaddr);
408 		next = RX_NEXT(i);
409 		desc->ddesc_next = htole32(ring->r_physaddr
410 		    + next * sizeof(*desc));
411 		desc->ddesc_cntl = htole32(
412 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
413 		    DDESC_CNTL_RXCHAIN);
414 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
415 	}
416 
417 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
418 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
419 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
420 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
421 	    ring->r_physaddr);
422 
423 	return 0;
424 
425 fail:
426 	dwc_gmac_free_rx_ring(sc, ring);
427 	return error;
428 }
429 
430 static void
431 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
432 	struct dwc_gmac_rx_ring *ring)
433 {
434 	struct dwc_gmac_dev_dmadesc *desc;
435 	int i;
436 
437 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
438 		desc = &sc->sc_rxq.r_desc[i];
439 		desc->ddesc_cntl = htole32(
440 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
441 		    DDESC_CNTL_RXCHAIN);
442 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
443 	}
444 
445 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
446 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
447 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
448 
449 	ring->r_cur = ring->r_next = 0;
450 	/* reset DMA address to start of ring */
451 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
452 	    sc->sc_rxq.r_physaddr);
453 }
454 
455 static int
456 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
457 {
458 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
459 		sizeof(struct dwc_gmac_dev_dmadesc);
460 	int error, nsegs;
461 	void *rings;
462 
463 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
464 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
465 	if (error != 0) {
466 		aprint_error_dev(sc->sc_dev,
467 		    "could not create desc DMA map\n");
468 		sc->sc_dma_ring_map = NULL;
469 		goto fail;
470 	}
471 
472 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
473 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
474 	if (error != 0) {
475 		aprint_error_dev(sc->sc_dev,
476 		    "could not map DMA memory\n");
477 		goto fail;
478 	}
479 
480 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
481 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
482 	if (error != 0) {
483 		aprint_error_dev(sc->sc_dev,
484 		    "could not allocate DMA memory\n");
485 		goto fail;
486 	}
487 
488 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
489 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
490 	if (error != 0) {
491 		aprint_error_dev(sc->sc_dev,
492 		    "could not load desc DMA map\n");
493 		goto fail;
494 	}
495 
496 	/* give first AWGE_RX_RING_COUNT to the RX side */
497 	sc->sc_rxq.r_desc = rings;
498 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
499 
500 	/* and next rings to the TX side */
501 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
502 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
503 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
504 
505 	return 0;
506 
507 fail:
508 	dwc_gmac_free_dma_rings(sc);
509 	return error;
510 }
511 
512 static void
513 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
514 {
515 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
516 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
517 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
518 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
519 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
520 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
521 }
522 
523 static void
524 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
525 {
526 	struct dwc_gmac_rx_data *data;
527 	int i;
528 
529 	if (ring->r_desc == NULL)
530 		return;
531 
532 
533 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
534 		data = &ring->r_data[i];
535 
536 		if (data->rd_map != NULL) {
537 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
538 			    AWGE_RX_RING_COUNT
539 				*sizeof(struct dwc_gmac_dev_dmadesc),
540 			    BUS_DMASYNC_POSTREAD);
541 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
542 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
543 		}
544 		if (data->rd_m != NULL)
545 			m_freem(data->rd_m);
546 	}
547 }
548 
549 static int
550 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
551 	struct dwc_gmac_tx_ring *ring)
552 {
553 	int i, error = 0;
554 
555 	ring->t_queued = 0;
556 	ring->t_cur = ring->t_next = 0;
557 
558 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
559 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
560 	    TX_DESC_OFFSET(0),
561 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
562 	    BUS_DMASYNC_POSTWRITE);
563 
564 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
565 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
566 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
567 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
568 		    &ring->t_data[i].td_map);
569 		if (error != 0) {
570 			aprint_error_dev(sc->sc_dev,
571 			    "could not create TX DMA map #%d\n", i);
572 			ring->t_data[i].td_map = NULL;
573 			goto fail;
574 		}
575 		ring->t_desc[i].ddesc_next = htole32(
576 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
577 		    *TX_NEXT(i));
578 	}
579 
580 	return 0;
581 
582 fail:
583 	dwc_gmac_free_tx_ring(sc, ring);
584 	return error;
585 }
586 
587 static void
588 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
589 {
590 	/* 'end' is pointing one descriptor beyound the last we want to sync */
591 	if (end > start) {
592 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
593 		    TX_DESC_OFFSET(start),
594 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
595 		    ops);
596 		return;
597 	}
598 	/* sync from 'start' to end of ring */
599 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
600 	    TX_DESC_OFFSET(start),
601 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
602 	    ops);
603 	/* sync from start of ring to 'end' */
604 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
605 	    TX_DESC_OFFSET(0),
606 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
607 	    ops);
608 }
609 
610 static void
611 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
612 	struct dwc_gmac_tx_ring *ring)
613 {
614 	int i;
615 
616 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
617 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
618 
619 		if (data->td_m != NULL) {
620 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
621 			    0, data->td_active->dm_mapsize,
622 			    BUS_DMASYNC_POSTWRITE);
623 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
624 			m_freem(data->td_m);
625 			data->td_m = NULL;
626 		}
627 	}
628 
629 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
630 	    TX_DESC_OFFSET(0),
631 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
632 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
633 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
634 	    sc->sc_txq.t_physaddr);
635 
636 	ring->t_queued = 0;
637 	ring->t_cur = ring->t_next = 0;
638 }
639 
640 static void
641 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
642 	struct dwc_gmac_tx_ring *ring)
643 {
644 	int i;
645 
646 	/* unload the maps */
647 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
648 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
649 
650 		if (data->td_m != NULL) {
651 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
652 			    0, data->td_map->dm_mapsize,
653 			    BUS_DMASYNC_POSTWRITE);
654 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
655 			m_freem(data->td_m);
656 			data->td_m = NULL;
657 		}
658 	}
659 
660 	/* and actually free them */
661 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
662 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
663 
664 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
665 	}
666 }
667 
668 static void
669 dwc_gmac_miibus_statchg(struct ifnet *ifp)
670 {
671 	struct dwc_gmac_softc * const sc = ifp->if_softc;
672 	struct mii_data * const mii = &sc->sc_mii;
673 	uint32_t conf, flow;
674 
675 	/*
676 	 * Set MII or GMII interface based on the speed
677 	 * negotiated by the PHY.
678 	 */
679 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
680 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
681 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
682 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
683 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
684 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
685 	    | AWIN_GMAC_MAC_CONF_ACS
686 	    | AWIN_GMAC_MAC_CONF_RXENABLE
687 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
688 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
689 	case IFM_10_T:
690 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
691 		break;
692 	case IFM_100_TX:
693 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
694 			AWIN_GMAC_MAC_CONF_MIISEL;
695 		break;
696 	case IFM_1000_T:
697 		break;
698 	}
699 
700 	flow = 0;
701 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
702 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
703 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
704 	}
705 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
706 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
707 	}
708 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
709 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
710 	}
711 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
712 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
713 
714 #ifdef DWC_GMAC_DEBUG
715 	aprint_normal_dev(sc->sc_dev,
716 	    "setting MAC conf register: %08x\n", conf);
717 #endif
718 
719 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
720 	    AWIN_GMAC_MAC_CONF, conf);
721 }
722 
723 static int
724 dwc_gmac_init(struct ifnet *ifp)
725 {
726 	struct dwc_gmac_softc *sc = ifp->if_softc;
727 	uint32_t ffilt;
728 
729 	if (ifp->if_flags & IFF_RUNNING)
730 		return 0;
731 
732 	dwc_gmac_stop(ifp, 0);
733 
734 	/*
735 	 * Configure DMA burst/transfer mode and RX/TX priorities.
736 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
737 	 */
738 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
739 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
740 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
741 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
742 
743 	/*
744 	 * Set up address filter
745 	 */
746 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
747 	if (ifp->if_flags & IFF_PROMISC) {
748 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
749 	} else {
750 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
751 	}
752 	if (ifp->if_flags & IFF_BROADCAST) {
753 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
754 	} else {
755 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
756 	}
757 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
758 
759 	/*
760 	 * Set up multicast filter
761 	 */
762 	dwc_gmac_setmulti(sc);
763 
764 	/*
765 	 * Set up dma pointer for RX and TX ring
766 	 */
767 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
768 	    sc->sc_rxq.r_physaddr);
769 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
770 	    sc->sc_txq.t_physaddr);
771 
772 	/*
773 	 * Start RX/TX part
774 	 */
775 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
776 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
777 	    GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
778 
779 	ifp->if_flags |= IFF_RUNNING;
780 	ifp->if_flags &= ~IFF_OACTIVE;
781 
782 	return 0;
783 }
784 
785 static void
786 dwc_gmac_start(struct ifnet *ifp)
787 {
788 	struct dwc_gmac_softc *sc = ifp->if_softc;
789 	int old = sc->sc_txq.t_queued;
790 	int start = sc->sc_txq.t_cur;
791 	struct mbuf *m0;
792 
793 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
794 		return;
795 
796 	for (;;) {
797 		IFQ_POLL(&ifp->if_snd, m0);
798 		if (m0 == NULL)
799 			break;
800 		if (dwc_gmac_queue(sc, m0) != 0) {
801 			ifp->if_flags |= IFF_OACTIVE;
802 			break;
803 		}
804 		IFQ_DEQUEUE(&ifp->if_snd, m0);
805 		bpf_mtap(ifp, m0);
806 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
807 			ifp->if_flags |= IFF_OACTIVE;
808 			break;
809 		}
810 	}
811 
812 	if (sc->sc_txq.t_queued != old) {
813 		/* packets have been queued, kick it off */
814 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
815 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
816 
817 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
818 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
819 #ifdef DWC_GMAC_DEBUG
820 		dwc_dump_status(sc);
821 #endif
822 	}
823 }
824 
825 static void
826 dwc_gmac_stop(struct ifnet *ifp, int disable)
827 {
828 	struct dwc_gmac_softc *sc = ifp->if_softc;
829 
830 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
831 	    AWIN_GMAC_DMA_OPMODE,
832 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
833 	        AWIN_GMAC_DMA_OPMODE)
834 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
835 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
836 	    AWIN_GMAC_DMA_OPMODE,
837 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
838 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
839 
840 	mii_down(&sc->sc_mii);
841 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
842 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
843 }
844 
845 /*
846  * Add m0 to the TX ring
847  */
848 static int
849 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
850 {
851 	struct dwc_gmac_dev_dmadesc *desc = NULL;
852 	struct dwc_gmac_tx_data *data = NULL;
853 	bus_dmamap_t map;
854 	uint32_t flags, len, status;
855 	int error, i, first;
856 
857 #ifdef DWC_GMAC_DEBUG
858 	aprint_normal_dev(sc->sc_dev,
859 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
860 #endif
861 
862 	first = sc->sc_txq.t_cur;
863 	map = sc->sc_txq.t_data[first].td_map;
864 
865 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
866 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
867 	if (error != 0) {
868 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
869 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
870 		return error;
871 	}
872 
873 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
874 		bus_dmamap_unload(sc->sc_dmat, map);
875 		return ENOBUFS;
876 	}
877 
878 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
879 	status = 0;
880 	for (i = 0; i < map->dm_nsegs; i++) {
881 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
882 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
883 
884 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
885 		len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
886 
887 #ifdef DWC_GMAC_DEBUG
888 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
889 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
890 		    (unsigned long)map->dm_segs[i].ds_addr,
891 		    (unsigned long)map->dm_segs[i].ds_len,
892 		    flags, len);
893 #endif
894 
895 		desc->ddesc_cntl = htole32(len|flags);
896 		flags &= ~DDESC_CNTL_TXFIRST;
897 
898 		/*
899 		 * Defer passing ownership of the first descriptor
900 		 * until we are done.
901 		 */
902 		desc->ddesc_status = htole32(status);
903 		status |= DDESC_STATUS_OWNEDBYDEV;
904 
905 		sc->sc_txq.t_queued++;
906 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
907 	}
908 
909 	desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
910 
911 	data->td_m = m0;
912 	data->td_active = map;
913 
914 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
915 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
916 
917 	/* Pass first to device */
918 	sc->sc_txq.t_desc[first].ddesc_status =
919 	    htole32(DDESC_STATUS_OWNEDBYDEV);
920 
921 	return 0;
922 }
923 
924 /*
925  * If the interface is up and running, only modify the receive
926  * filter when setting promiscuous or debug mode.  Otherwise fall
927  * through to ether_ioctl, which will reset the chip.
928  */
929 static int
930 dwc_gmac_ifflags_cb(struct ethercom *ec)
931 {
932 	struct ifnet *ifp = &ec->ec_if;
933 	struct dwc_gmac_softc *sc = ifp->if_softc;
934 	int change = ifp->if_flags ^ sc->sc_if_flags;
935 
936 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
937 		return ENETRESET;
938 	if ((change & IFF_PROMISC) != 0)
939 		dwc_gmac_setmulti(sc);
940 	return 0;
941 }
942 
943 static int
944 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
945 {
946 	struct dwc_gmac_softc *sc = ifp->if_softc;
947 	int s, error = 0;
948 
949 	s = splnet();
950 
951 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
952 		error = 0;
953 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
954 			;
955 		else if (ifp->if_flags & IFF_RUNNING) {
956 			/*
957 			 * Multicast list has changed; set the hardware filter
958 			 * accordingly.
959 			 */
960 			dwc_gmac_setmulti(sc);
961 		}
962 	}
963 
964 	/* Try to get things going again */
965 	if (ifp->if_flags & IFF_UP)
966 		dwc_gmac_start(ifp);
967 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
968 	splx(s);
969 	return error;
970 }
971 
972 static void
973 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
974 {
975 	struct ifnet *ifp = &sc->sc_ec.ec_if;
976 	struct dwc_gmac_tx_data *data;
977 	struct dwc_gmac_dev_dmadesc *desc;
978 	uint32_t status;
979 	int i, nsegs;
980 
981 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
982 #ifdef DWC_GMAC_DEBUG
983 		aprint_normal_dev(sc->sc_dev,
984 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
985 		    i, sc->sc_txq.t_queued);
986 #endif
987 
988 		/*
989 		 * i+1 does not need to be a valid descriptor,
990 		 * this is just a special notion to just sync
991 		 * a single tx descriptor (i)
992 		 */
993 		dwc_gmac_txdesc_sync(sc, i, i+1,
994 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
995 
996 		desc = &sc->sc_txq.t_desc[i];
997 		status = le32toh(desc->ddesc_status);
998 		if (status & DDESC_STATUS_OWNEDBYDEV)
999 			break;
1000 
1001 		data = &sc->sc_txq.t_data[i];
1002 		if (data->td_m == NULL)
1003 			continue;
1004 
1005 		ifp->if_opackets++;
1006 		nsegs = data->td_active->dm_nsegs;
1007 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1008 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1009 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1010 
1011 #ifdef DWC_GMAC_DEBUG
1012 		aprint_normal_dev(sc->sc_dev,
1013 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1014 		    "freeing mbuf %p\n", i, data->td_m);
1015 #endif
1016 
1017 		m_freem(data->td_m);
1018 		data->td_m = NULL;
1019 
1020 		sc->sc_txq.t_queued -= nsegs;
1021 	}
1022 
1023 	sc->sc_txq.t_next = i;
1024 
1025 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1026 		ifp->if_flags &= ~IFF_OACTIVE;
1027 	}
1028 }
1029 
1030 static void
1031 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1032 {
1033 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1034 	struct dwc_gmac_dev_dmadesc *desc;
1035 	struct dwc_gmac_rx_data *data;
1036 	bus_addr_t physaddr;
1037 	uint32_t status;
1038 	struct mbuf *m, *mnew;
1039 	int i, len, error;
1040 
1041 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1042 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1043 		    RX_DESC_OFFSET(i), sizeof(*desc),
1044 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1045 		desc = &sc->sc_rxq.r_desc[i];
1046 		data = &sc->sc_rxq.r_data[i];
1047 
1048 		status = le32toh(desc->ddesc_status);
1049 		if (status & DDESC_STATUS_OWNEDBYDEV)
1050 			break;
1051 
1052 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1053 #ifdef DWC_GMAC_DEBUG
1054 			aprint_normal_dev(sc->sc_dev,
1055 			    "RX error: descriptor status %08x, skipping\n",
1056 			    status);
1057 #endif
1058 			ifp->if_ierrors++;
1059 			goto skip;
1060 		}
1061 
1062 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1063 
1064 #ifdef DWC_GMAC_DEBUG
1065 		aprint_normal_dev(sc->sc_dev,
1066 		    "rx int: device is done with descriptor #%d, len: %d\n",
1067 		    i, len);
1068 #endif
1069 
1070 		/*
1071 		 * Try to get a new mbuf before passing this one
1072 		 * up, if that fails, drop the packet and reuse
1073 		 * the existing one.
1074 		 */
1075 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1076 		if (mnew == NULL) {
1077 			ifp->if_ierrors++;
1078 			goto skip;
1079 		}
1080 		MCLGET(mnew, M_DONTWAIT);
1081 		if ((mnew->m_flags & M_EXT) == 0) {
1082 			m_freem(mnew);
1083 			ifp->if_ierrors++;
1084 			goto skip;
1085 		}
1086 
1087 		/* unload old DMA map */
1088 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1089 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1090 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1091 
1092 		/* and reload with new mbuf */
1093 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1094 		    mtod(mnew, void*), MCLBYTES, NULL,
1095 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1096 		if (error != 0) {
1097 			m_freem(mnew);
1098 			/* try to reload old mbuf */
1099 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1100 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
1101 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1102 			if (error != 0) {
1103 				panic("%s: could not load old rx mbuf",
1104 				    device_xname(sc->sc_dev));
1105 			}
1106 			ifp->if_ierrors++;
1107 			goto skip;
1108 		}
1109 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1110 
1111 		/*
1112 		 * New mbuf loaded, update RX ring and continue
1113 		 */
1114 		m = data->rd_m;
1115 		data->rd_m = mnew;
1116 		desc->ddesc_data = htole32(physaddr);
1117 
1118 		/* finalize mbuf */
1119 		m->m_pkthdr.len = m->m_len = len;
1120 		m->m_pkthdr.rcvif = ifp;
1121 		m->m_flags |= M_HASFCS;
1122 
1123 		bpf_mtap(ifp, m);
1124 		ifp->if_ipackets++;
1125 		(*ifp->if_input)(ifp, m);
1126 
1127 skip:
1128 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1129 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1130 		desc->ddesc_cntl = htole32(
1131 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1132 		    DDESC_CNTL_RXCHAIN);
1133 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1134 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1135 		    RX_DESC_OFFSET(i), sizeof(*desc),
1136 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1137 	}
1138 
1139 	/* update RX pointer */
1140 	sc->sc_rxq.r_cur = i;
1141 
1142 }
1143 
1144 /*
1145  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1146  */
1147 static uint32_t
1148 bitrev32(uint32_t x)
1149 {
1150 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1151 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1152 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1153 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1154 
1155 	return (x >> 16) | (x << 16);
1156 }
1157 
1158 static void
1159 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1160 {
1161 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1162 	struct ether_multi *enm;
1163 	struct ether_multistep step;
1164 	uint32_t hashes[2] = { 0, 0 };
1165 	uint32_t ffilt, h;
1166 	int mcnt, s;
1167 
1168 	s = splnet();
1169 
1170 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1171 
1172 	if (ifp->if_flags & IFF_PROMISC) {
1173 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1174 		goto special_filter;
1175 	}
1176 
1177 	ifp->if_flags &= ~IFF_ALLMULTI;
1178 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1179 
1180 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1181 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1182 
1183 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1184 	mcnt = 0;
1185 	while (enm != NULL) {
1186 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1187 		    ETHER_ADDR_LEN) != 0) {
1188 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1189 			ifp->if_flags |= IFF_ALLMULTI;
1190 			goto special_filter;
1191 		}
1192 
1193 		h = bitrev32(
1194 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1195 		    ) >> 26;
1196 		hashes[h >> 5] |= (1 << (h & 0x1f));
1197 
1198 		mcnt++;
1199 		ETHER_NEXT_MULTI(step, enm);
1200 	}
1201 
1202 	if (mcnt)
1203 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1204 	else
1205 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1206 
1207 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1208 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1209 	    hashes[0]);
1210 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1211 	    hashes[1]);
1212 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1213 
1214 	splx(s);
1215 
1216 #ifdef DWC_GMAC_DEBUG
1217 	dwc_gmac_dump_ffilt(sc, ffilt);
1218 #endif
1219 	return;
1220 
1221 special_filter:
1222 #ifdef DWC_GMAC_DEBUG
1223 	dwc_gmac_dump_ffilt(sc, ffilt);
1224 #endif
1225 	/* no MAC hashes, ALLMULTI or PROMISC */
1226 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1227 	    ffilt);
1228 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1229 	    0xffffffff);
1230 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1231 	    0xffffffff);
1232 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1233 	splx(s);
1234 }
1235 
1236 int
1237 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1238 {
1239 	uint32_t status, dma_status;
1240 	int rv = 0;
1241 
1242 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1243 	if (status & AWIN_GMAC_MII_IRQ) {
1244 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1245 		    AWIN_GMAC_MII_STATUS);
1246 		rv = 1;
1247 		mii_pollstat(&sc->sc_mii);
1248 	}
1249 
1250 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1251 	    AWIN_GMAC_DMA_STATUS);
1252 
1253 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1254 		rv = 1;
1255 
1256 	if (dma_status & GMAC_DMA_INT_TIE)
1257 		dwc_gmac_tx_intr(sc);
1258 
1259 	if (dma_status & GMAC_DMA_INT_RIE)
1260 		dwc_gmac_rx_intr(sc);
1261 
1262 	/*
1263 	 * Check error conditions
1264 	 */
1265 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1266 		sc->sc_ec.ec_if.if_oerrors++;
1267 #ifdef DWC_GMAC_DEBUG
1268 		dwc_dump_and_abort(sc, "interrupt error condition");
1269 #endif
1270 	}
1271 
1272 	/* ack interrupt */
1273 	if (dma_status)
1274 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1275 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1276 
1277 	/*
1278 	 * Get more packets
1279 	 */
1280 	if (rv)
1281 		sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
1282 
1283 	return rv;
1284 }
1285 
1286 #ifdef DWC_GMAC_DEBUG
1287 static void
1288 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1289 {
1290 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1291 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1292 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1293 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1294 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1295 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1296 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1297 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1298 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1299 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1300 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1301 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1302 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1303 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1304 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1305 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1306 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1307 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1308 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1309 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1310 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1311 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1312 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1313 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1314 }
1315 
1316 static void
1317 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1318 {
1319 	int i;
1320 
1321 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1322 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1323 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1324 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1325 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1326 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1327 		    "data: %08x next: %08x\n",
1328 		    i, sc->sc_txq.t_physaddr +
1329 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1330 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1331 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1332 	}
1333 }
1334 
1335 static void
1336 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1337 {
1338 	int i;
1339 
1340 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1341 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1342 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1343 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1344 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1345 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1346 		    "data: %08x next: %08x\n",
1347 		    i, sc->sc_rxq.r_physaddr +
1348 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1349 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1350 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1351 	}
1352 }
1353 
1354 static void
1355 dwc_dump_status(struct dwc_gmac_softc *sc)
1356 {
1357 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1358 	     AWIN_GMAC_MAC_INTR);
1359 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1360 	     AWIN_GMAC_DMA_STATUS);
1361 	char buf[200];
1362 
1363 	/* print interrupt state */
1364 	snprintb(buf, sizeof(buf), "\177\20"
1365 	    "b\x10""NI\0"
1366 	    "b\x0f""AI\0"
1367 	    "b\x0e""ER\0"
1368 	    "b\x0d""FB\0"
1369 	    "b\x0a""ET\0"
1370 	    "b\x09""RW\0"
1371 	    "b\x08""RS\0"
1372 	    "b\x07""RU\0"
1373 	    "b\x06""RI\0"
1374 	    "b\x05""UN\0"
1375 	    "b\x04""OV\0"
1376 	    "b\x03""TJ\0"
1377 	    "b\x02""TU\0"
1378 	    "b\x01""TS\0"
1379 	    "b\x00""TI\0"
1380 	    "\0", dma_status);
1381 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1382 	    status, buf);
1383 }
1384 
1385 static void
1386 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1387 {
1388 	dwc_dump_status(sc);
1389 	dwc_gmac_dump_ffilt(sc,
1390 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1391 	dwc_gmac_dump_dma(sc);
1392 	dwc_gmac_dump_tx_desc(sc);
1393 	dwc_gmac_dump_rx_desc(sc);
1394 
1395 	panic("%s", msg);
1396 }
1397 
1398 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1399 {
1400 	char buf[200];
1401 
1402 	/* print filter setup */
1403 	snprintb(buf, sizeof(buf), "\177\20"
1404 	    "b\x1f""RA\0"
1405 	    "b\x0a""HPF\0"
1406 	    "b\x09""SAF\0"
1407 	    "b\x08""SAIF\0"
1408 	    "b\x05""DBF\0"
1409 	    "b\x04""PM\0"
1410 	    "b\x03""DAIF\0"
1411 	    "b\x02""HMC\0"
1412 	    "b\x01""HUC\0"
1413 	    "b\x00""PR\0"
1414 	    "\0", ffilt);
1415 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1416 }
1417 #endif
1418