xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* $NetBSD: dwc_gmac.c,v 1.69 2020/01/29 14:14:55 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.69 2020/01/29 14:14:55 thorpej Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52 
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 #include <sys/rndsource.h>
61 
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_media.h>
65 #include <net/bpf.h>
66 #ifdef INET
67 #include <netinet/if_inarp.h>
68 #endif
69 
70 #include <dev/mii/miivar.h>
71 
72 #include <dev/ic/dwc_gmac_reg.h>
73 #include <dev/ic/dwc_gmac_var.h>
74 
75 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 static void dwc_gmac_miibus_statchg(struct ifnet *);
78 
79 static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 static int dwc_gmac_init(struct ifnet *);
91 static int dwc_gmac_init_locked(struct ifnet *);
92 static void dwc_gmac_stop(struct ifnet *, int);
93 static void dwc_gmac_stop_locked(struct ifnet *, int);
94 static void dwc_gmac_start(struct ifnet *);
95 static void dwc_gmac_start_locked(struct ifnet *);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t	bitrev32(uint32_t);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119 
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 	.tx_set_len = dwc_gmac_desc_std_set_len,
125 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 	.rx_set_len = dwc_gmac_desc_std_set_len,
131 	.rx_get_len = dwc_gmac_desc_std_get_len,
132 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134 
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 	.tx_set_len = dwc_gmac_desc_enh_set_len,
140 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 	.rx_set_len = dwc_gmac_desc_enh_set_len,
146 	.rx_get_len = dwc_gmac_desc_enh_get_len,
147 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149 
150 
151 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
152 				    *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
154 
155 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
157 
158 
159 
160 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163 
164 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 				GMAC_DMA_INT_FBE |	\
166 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 				GMAC_DMA_INT_TJE)
169 
170 #define	AWIN_DEF_MAC_INTRMASK	\
171 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
172 	AWIN_GMAC_MAC_INT_LINKCHG)
173 
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 static void dwc_dump_status(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 #endif
182 
183 int
184 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 {
186 	uint8_t enaddr[ETHER_ADDR_LEN];
187 	uint32_t maclo, machi, ver, hwft;
188 	struct mii_data * const mii = &sc->sc_mii;
189 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 	prop_dictionary_t dict;
191 	int rv;
192 
193 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
194 	sc->sc_mii_clk = mii_clk & 7;
195 
196 	dict = device_properties(sc->sc_dev);
197 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
198 	if (ea != NULL) {
199 		/*
200 		 * If the MAC address is overriden by a device property,
201 		 * use that.
202 		 */
203 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
204 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
205 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
206 	} else {
207 		/*
208 		 * If we did not get an externaly configure address,
209 		 * try to read one from the current filter setup,
210 		 * before resetting the chip.
211 		 */
212 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
213 		    AWIN_GMAC_MAC_ADDR0LO);
214 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
215 		    AWIN_GMAC_MAC_ADDR0HI);
216 
217 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
218 			/* fake MAC address */
219 			maclo = 0x00f2 | (cprng_strong32() << 16);
220 			machi = cprng_strong32();
221 		}
222 
223 		enaddr[0] = maclo & 0x0ff;
224 		enaddr[1] = (maclo >> 8) & 0x0ff;
225 		enaddr[2] = (maclo >> 16) & 0x0ff;
226 		enaddr[3] = (maclo >> 24) & 0x0ff;
227 		enaddr[4] = machi & 0x0ff;
228 		enaddr[5] = (machi >> 8) & 0x0ff;
229 	}
230 
231 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
232 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
233 
234 	/*
235 	 * Init chip and do initial setup
236 	 */
237 	if (dwc_gmac_reset(sc) != 0)
238 		return ENXIO;	/* not much to cleanup, haven't attached yet */
239 	dwc_gmac_write_hwaddr(sc, enaddr);
240 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 	    ether_sprintf(enaddr));
242 
243 	hwft = 0;
244 	if (ver >= 0x35) {
245 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 		    AWIN_GMAC_DMA_HWFEATURES);
247 		aprint_normal_dev(sc->sc_dev,
248 		    "HW feature mask: %x\n", hwft);
249 	}
250 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
251 		aprint_normal_dev(sc->sc_dev,
252 		    "Using enhanced descriptor format\n");
253 		sc->sc_descm = &desc_methods_enhanced;
254 	} else {
255 		sc->sc_descm = &desc_methods_standard;
256 	}
257 
258 	/*
259 	 * Allocate Tx and Rx rings
260 	 */
261 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
262 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
263 		goto fail;
264 	}
265 
266 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
267 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
268 		goto fail;
269 	}
270 
271 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
272 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
273 		goto fail;
274 	}
275 
276 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
277 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
278 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
279 
280 	/*
281 	 * Prepare interface data
282 	 */
283 	ifp->if_softc = sc;
284 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
285 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
286 #ifdef DWCGMAC_MPSAFE
287 	ifp->if_extflags = IFEF_MPSAFE;
288 #endif
289 	ifp->if_ioctl = dwc_gmac_ioctl;
290 	ifp->if_start = dwc_gmac_start;
291 	ifp->if_init = dwc_gmac_init;
292 	ifp->if_stop = dwc_gmac_stop;
293 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
294 	IFQ_SET_READY(&ifp->if_snd);
295 
296 	/*
297 	 * Attach MII subdevices
298 	 */
299 	sc->sc_ec.ec_mii = &sc->sc_mii;
300 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
301 	mii->mii_ifp = ifp;
302 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
303 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
304 	mii->mii_statchg = dwc_gmac_miibus_statchg;
305 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
306 	    MIIF_DOPAUSE);
307 
308 	if (LIST_EMPTY(&mii->mii_phys)) {
309 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
310 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
311 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
312 	} else {
313 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
314 	}
315 
316 	/*
317 	 * We can support 802.1Q VLAN-sized frames.
318 	 */
319 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
320 
321 	/*
322 	 * Ready, attach interface
323 	 */
324 	/* Attach the interface. */
325 	rv = if_initialize(ifp);
326 	if (rv != 0)
327 		goto fail_2;
328 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
329 	if_deferred_start_init(ifp, NULL);
330 	ether_ifattach(ifp, enaddr);
331 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
332 	if_register(ifp);
333 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
334 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
335 
336 	/*
337 	 * Enable interrupts
338 	 */
339 	mutex_enter(sc->sc_lock);
340 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
341 	    AWIN_DEF_MAC_INTRMASK);
342 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
343 	    GMAC_DEF_DMA_INT_MASK);
344 	mutex_exit(sc->sc_lock);
345 
346 	return 0;
347 
348 fail_2:
349 	ifmedia_removeall(&mii->mii_media);
350 	mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
351 	mutex_destroy(&sc->sc_txq.t_mtx);
352 	mutex_destroy(&sc->sc_rxq.r_mtx);
353 	mutex_obj_free(sc->sc_lock);
354 fail:
355 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
356 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
357 	dwc_gmac_free_dma_rings(sc);
358 	mutex_destroy(&sc->sc_mdio_lock);
359 
360 	return ENXIO;
361 }
362 
363 
364 
365 static int
366 dwc_gmac_reset(struct dwc_gmac_softc *sc)
367 {
368 	size_t cnt;
369 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
370 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
371 	    | GMAC_BUSMODE_RESET);
372 	for (cnt = 0; cnt < 3000; cnt++) {
373 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
374 		    & GMAC_BUSMODE_RESET) == 0)
375 			return 0;
376 		delay(10);
377 	}
378 
379 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
380 	return EIO;
381 }
382 
383 static void
384 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
385     uint8_t enaddr[ETHER_ADDR_LEN])
386 {
387 	uint32_t hi, lo;
388 
389 	hi = enaddr[4] | (enaddr[5] << 8);
390 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
391 	    | (enaddr[3] << 24);
392 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
393 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
394 }
395 
396 static int
397 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
398 {
399 	struct dwc_gmac_softc * const sc = device_private(self);
400 	uint16_t mii;
401 	size_t cnt;
402 
403 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
404 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
405 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
406 	    | GMAC_MII_BUSY;
407 
408 	mutex_enter(&sc->sc_mdio_lock);
409 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
410 
411 	for (cnt = 0; cnt < 1000; cnt++) {
412 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
413 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
414 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
415 			    AWIN_GMAC_MAC_MIIDATA);
416 			break;
417 		}
418 		delay(10);
419 	}
420 
421 	mutex_exit(&sc->sc_mdio_lock);
422 
423 	if (cnt >= 1000)
424 		return ETIMEDOUT;
425 
426 	return 0;
427 }
428 
429 static int
430 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
431 {
432 	struct dwc_gmac_softc * const sc = device_private(self);
433 	uint16_t mii;
434 	size_t cnt;
435 
436 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
437 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
438 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
439 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
440 
441 	mutex_enter(&sc->sc_mdio_lock);
442 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
443 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
444 
445 	for (cnt = 0; cnt < 1000; cnt++) {
446 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
447 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
448 			break;
449 		delay(10);
450 	}
451 
452 	mutex_exit(&sc->sc_mdio_lock);
453 
454 	if (cnt >= 1000)
455 		return ETIMEDOUT;
456 
457 	return 0;
458 }
459 
460 static int
461 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
462 	struct dwc_gmac_rx_ring *ring)
463 {
464 	struct dwc_gmac_rx_data *data;
465 	bus_addr_t physaddr;
466 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
467 	int error, i, next;
468 
469 	ring->r_cur = ring->r_next = 0;
470 	memset(ring->r_desc, 0, descsize);
471 
472 	/*
473 	 * Pre-allocate Rx buffers and populate Rx ring.
474 	 */
475 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
476 		struct dwc_gmac_dev_dmadesc *desc;
477 
478 		data = &sc->sc_rxq.r_data[i];
479 
480 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
481 		if (data->rd_m == NULL) {
482 			aprint_error_dev(sc->sc_dev,
483 			    "could not allocate rx mbuf #%d\n", i);
484 			error = ENOMEM;
485 			goto fail;
486 		}
487 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
488 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
489 		if (error != 0) {
490 			aprint_error_dev(sc->sc_dev,
491 			    "could not create DMA map\n");
492 			data->rd_map = NULL;
493 			goto fail;
494 		}
495 		MCLGET(data->rd_m, M_DONTWAIT);
496 		if (!(data->rd_m->m_flags & M_EXT)) {
497 			aprint_error_dev(sc->sc_dev,
498 			    "could not allocate mbuf cluster #%d\n", i);
499 			error = ENOMEM;
500 			goto fail;
501 		}
502 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
503 		    = data->rd_m->m_ext.ext_size;
504 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
505 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
506 			    = AWGE_MAX_PACKET;
507 		}
508 
509 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
510 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
511 		if (error != 0) {
512 			aprint_error_dev(sc->sc_dev,
513 			    "could not load rx buf DMA map #%d", i);
514 			goto fail;
515 		}
516 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
517 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
518 		physaddr = data->rd_map->dm_segs[0].ds_addr;
519 
520 		desc = &sc->sc_rxq.r_desc[i];
521 		desc->ddesc_data = htole32(physaddr);
522 		next = RX_NEXT(i);
523 		desc->ddesc_next = htole32(ring->r_physaddr
524 		    + next * sizeof(*desc));
525 		sc->sc_descm->rx_init_flags(desc);
526 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
527 		sc->sc_descm->rx_set_owned_by_dev(desc);
528 	}
529 
530 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
531 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
532 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
533 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
534 	    ring->r_physaddr);
535 
536 	return 0;
537 
538 fail:
539 	dwc_gmac_free_rx_ring(sc, ring);
540 	return error;
541 }
542 
543 static void
544 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
545 	struct dwc_gmac_rx_ring *ring)
546 {
547 	struct dwc_gmac_dev_dmadesc *desc;
548 	struct dwc_gmac_rx_data *data;
549 	int i;
550 
551 	mutex_enter(&ring->r_mtx);
552 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
553 		desc = &sc->sc_rxq.r_desc[i];
554 		data = &sc->sc_rxq.r_data[i];
555 		sc->sc_descm->rx_init_flags(desc);
556 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
557 		sc->sc_descm->rx_set_owned_by_dev(desc);
558 	}
559 
560 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
561 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
562 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
563 
564 	ring->r_cur = ring->r_next = 0;
565 	/* reset DMA address to start of ring */
566 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
567 	    sc->sc_rxq.r_physaddr);
568 	mutex_exit(&ring->r_mtx);
569 }
570 
571 static int
572 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
573 {
574 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
575 		sizeof(struct dwc_gmac_dev_dmadesc);
576 	int error, nsegs;
577 	void *rings;
578 
579 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
580 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
581 	if (error != 0) {
582 		aprint_error_dev(sc->sc_dev,
583 		    "could not create desc DMA map\n");
584 		sc->sc_dma_ring_map = NULL;
585 		goto fail;
586 	}
587 
588 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
589 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
590 	if (error != 0) {
591 		aprint_error_dev(sc->sc_dev,
592 		    "could not map DMA memory\n");
593 		goto fail;
594 	}
595 
596 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
597 	    descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
598 	if (error != 0) {
599 		aprint_error_dev(sc->sc_dev,
600 		    "could not allocate DMA memory\n");
601 		goto fail;
602 	}
603 
604 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
605 	    descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
606 	if (error != 0) {
607 		aprint_error_dev(sc->sc_dev,
608 		    "could not load desc DMA map\n");
609 		goto fail;
610 	}
611 
612 	/* give first AWGE_RX_RING_COUNT to the RX side */
613 	sc->sc_rxq.r_desc = rings;
614 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
615 
616 	/* and next rings to the TX side */
617 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
618 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
619 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
620 
621 	return 0;
622 
623 fail:
624 	dwc_gmac_free_dma_rings(sc);
625 	return error;
626 }
627 
628 static void
629 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
630 {
631 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
632 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
633 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
634 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
635 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
636 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
637 }
638 
639 static void
640 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
641 {
642 	struct dwc_gmac_rx_data *data;
643 	int i;
644 
645 	if (ring->r_desc == NULL)
646 		return;
647 
648 
649 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
650 		data = &ring->r_data[i];
651 
652 		if (data->rd_map != NULL) {
653 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
654 			    AWGE_RX_RING_COUNT
655 				*sizeof(struct dwc_gmac_dev_dmadesc),
656 			    BUS_DMASYNC_POSTREAD);
657 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
658 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
659 		}
660 		if (data->rd_m != NULL)
661 			m_freem(data->rd_m);
662 	}
663 }
664 
665 static int
666 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
667 	struct dwc_gmac_tx_ring *ring)
668 {
669 	int i, error = 0;
670 
671 	ring->t_queued = 0;
672 	ring->t_cur = ring->t_next = 0;
673 
674 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
675 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
676 	    TX_DESC_OFFSET(0),
677 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
678 	    BUS_DMASYNC_POSTWRITE);
679 
680 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
681 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
682 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
683 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
684 		    &ring->t_data[i].td_map);
685 		if (error != 0) {
686 			aprint_error_dev(sc->sc_dev,
687 			    "could not create TX DMA map #%d\n", i);
688 			ring->t_data[i].td_map = NULL;
689 			goto fail;
690 		}
691 		ring->t_desc[i].ddesc_next = htole32(
692 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
693 		    *TX_NEXT(i));
694 	}
695 
696 	return 0;
697 
698 fail:
699 	dwc_gmac_free_tx_ring(sc, ring);
700 	return error;
701 }
702 
703 static void
704 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
705 {
706 	/* 'end' is pointing one descriptor beyond the last we want to sync */
707 	if (end > start) {
708 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
709 		    TX_DESC_OFFSET(start),
710 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
711 		    ops);
712 		return;
713 	}
714 	/* sync from 'start' to end of ring */
715 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
716 	    TX_DESC_OFFSET(start),
717 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
718 	    ops);
719 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
720 		/* sync from start of ring to 'end' */
721 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
722 		    TX_DESC_OFFSET(0),
723 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
724 		    ops);
725 	}
726 }
727 
728 static void
729 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
730 	struct dwc_gmac_tx_ring *ring)
731 {
732 	int i;
733 
734 	mutex_enter(&ring->t_mtx);
735 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
736 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
737 
738 		if (data->td_m != NULL) {
739 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
740 			    0, data->td_active->dm_mapsize,
741 			    BUS_DMASYNC_POSTWRITE);
742 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
743 			m_freem(data->td_m);
744 			data->td_m = NULL;
745 		}
746 	}
747 
748 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
749 	    TX_DESC_OFFSET(0),
750 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
751 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
752 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
753 	    sc->sc_txq.t_physaddr);
754 
755 	ring->t_queued = 0;
756 	ring->t_cur = ring->t_next = 0;
757 	mutex_exit(&ring->t_mtx);
758 }
759 
760 static void
761 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
762 	struct dwc_gmac_tx_ring *ring)
763 {
764 	int i;
765 
766 	/* unload the maps */
767 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
768 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
769 
770 		if (data->td_m != NULL) {
771 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
772 			    0, data->td_map->dm_mapsize,
773 			    BUS_DMASYNC_POSTWRITE);
774 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
775 			m_freem(data->td_m);
776 			data->td_m = NULL;
777 		}
778 	}
779 
780 	/* and actually free them */
781 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
782 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
783 
784 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
785 	}
786 }
787 
788 static void
789 dwc_gmac_miibus_statchg(struct ifnet *ifp)
790 {
791 	struct dwc_gmac_softc * const sc = ifp->if_softc;
792 	struct mii_data * const mii = &sc->sc_mii;
793 	uint32_t conf, flow;
794 
795 	/*
796 	 * Set MII or GMII interface based on the speed
797 	 * negotiated by the PHY.
798 	 */
799 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
800 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
801 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
802 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
803 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
804 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
805 	    | AWIN_GMAC_MAC_CONF_ACS
806 	    | AWIN_GMAC_MAC_CONF_RXENABLE
807 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
808 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
809 	case IFM_10_T:
810 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
811 		break;
812 	case IFM_100_TX:
813 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
814 			AWIN_GMAC_MAC_CONF_MIISEL;
815 		break;
816 	case IFM_1000_T:
817 		break;
818 	}
819 	if (sc->sc_set_speed)
820 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
821 
822 	flow = 0;
823 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
824 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
825 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
826 	}
827 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
828 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
829 	}
830 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
831 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
832 	}
833 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
834 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
835 
836 #ifdef DWC_GMAC_DEBUG
837 	aprint_normal_dev(sc->sc_dev,
838 	    "setting MAC conf register: %08x\n", conf);
839 #endif
840 
841 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
842 	    AWIN_GMAC_MAC_CONF, conf);
843 }
844 
845 static int
846 dwc_gmac_init(struct ifnet *ifp)
847 {
848 	struct dwc_gmac_softc *sc = ifp->if_softc;
849 
850 	mutex_enter(sc->sc_lock);
851 	int ret = dwc_gmac_init_locked(ifp);
852 	mutex_exit(sc->sc_lock);
853 
854 	return ret;
855 }
856 
857 static int
858 dwc_gmac_init_locked(struct ifnet *ifp)
859 {
860 	struct dwc_gmac_softc *sc = ifp->if_softc;
861 	uint32_t ffilt;
862 
863 	if (ifp->if_flags & IFF_RUNNING)
864 		return 0;
865 
866 	dwc_gmac_stop_locked(ifp, 0);
867 
868 	/*
869 	 * Configure DMA burst/transfer mode and RX/TX priorities.
870 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
871 	 */
872 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
873 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
874 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
875 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
876 
877 	/*
878 	 * Set up address filter
879 	 */
880 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
881 	if (ifp->if_flags & IFF_PROMISC) {
882 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
883 	} else {
884 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
885 	}
886 	if (ifp->if_flags & IFF_BROADCAST) {
887 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
888 	} else {
889 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
890 	}
891 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
892 
893 	/*
894 	 * Set up multicast filter
895 	 */
896 	dwc_gmac_setmulti(sc);
897 
898 	/*
899 	 * Set up dma pointer for RX and TX ring
900 	 */
901 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
902 	    sc->sc_rxq.r_physaddr);
903 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
904 	    sc->sc_txq.t_physaddr);
905 
906 	/*
907 	 * Start RX/TX part
908 	 */
909 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
910 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
911 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
912 	}
913 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
914 
915 	sc->sc_stopping = false;
916 
917 	ifp->if_flags |= IFF_RUNNING;
918 	ifp->if_flags &= ~IFF_OACTIVE;
919 
920 	return 0;
921 }
922 
923 static void
924 dwc_gmac_start(struct ifnet *ifp)
925 {
926 	struct dwc_gmac_softc *sc = ifp->if_softc;
927 #ifdef DWCGMAC_MPSAFE
928 	KASSERT(if_is_mpsafe(ifp));
929 #endif
930 
931 	mutex_enter(sc->sc_lock);
932 	if (!sc->sc_stopping) {
933 		mutex_enter(&sc->sc_txq.t_mtx);
934 		dwc_gmac_start_locked(ifp);
935 		mutex_exit(&sc->sc_txq.t_mtx);
936 	}
937 	mutex_exit(sc->sc_lock);
938 }
939 
940 static void
941 dwc_gmac_start_locked(struct ifnet *ifp)
942 {
943 	struct dwc_gmac_softc *sc = ifp->if_softc;
944 	int old = sc->sc_txq.t_queued;
945 	int start = sc->sc_txq.t_cur;
946 	struct mbuf *m0;
947 
948 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
949 		return;
950 
951 	for (;;) {
952 		IFQ_POLL(&ifp->if_snd, m0);
953 		if (m0 == NULL)
954 			break;
955 		if (dwc_gmac_queue(sc, m0) != 0) {
956 			ifp->if_flags |= IFF_OACTIVE;
957 			break;
958 		}
959 		IFQ_DEQUEUE(&ifp->if_snd, m0);
960 		bpf_mtap(ifp, m0, BPF_D_OUT);
961 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
962 			ifp->if_flags |= IFF_OACTIVE;
963 			break;
964 		}
965 	}
966 
967 	if (sc->sc_txq.t_queued != old) {
968 		/* packets have been queued, kick it off */
969 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
970 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
971 
972 #ifdef DWC_GMAC_DEBUG
973 		dwc_dump_status(sc);
974 #endif
975 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
976 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
977 	}
978 }
979 
980 static void
981 dwc_gmac_stop(struct ifnet *ifp, int disable)
982 {
983 	struct dwc_gmac_softc *sc = ifp->if_softc;
984 
985 	mutex_enter(sc->sc_lock);
986 	dwc_gmac_stop_locked(ifp, disable);
987 	mutex_exit(sc->sc_lock);
988 }
989 
990 static void
991 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
992 {
993 	struct dwc_gmac_softc *sc = ifp->if_softc;
994 
995 	sc->sc_stopping = true;
996 
997 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
998 	    AWIN_GMAC_DMA_OPMODE,
999 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1000 		AWIN_GMAC_DMA_OPMODE)
1001 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1002 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1003 	    AWIN_GMAC_DMA_OPMODE,
1004 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1005 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1006 
1007 	mii_down(&sc->sc_mii);
1008 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1009 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1010 
1011 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1012 }
1013 
1014 /*
1015  * Add m0 to the TX ring
1016  */
1017 static int
1018 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1019 {
1020 	struct dwc_gmac_dev_dmadesc *desc = NULL;
1021 	struct dwc_gmac_tx_data *data = NULL;
1022 	bus_dmamap_t map;
1023 	int error, i, first;
1024 
1025 #ifdef DWC_GMAC_DEBUG
1026 	aprint_normal_dev(sc->sc_dev,
1027 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1028 #endif
1029 
1030 	first = sc->sc_txq.t_cur;
1031 	map = sc->sc_txq.t_data[first].td_map;
1032 
1033 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1034 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1035 	if (error != 0) {
1036 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
1037 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1038 		return error;
1039 	}
1040 
1041 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1042 		bus_dmamap_unload(sc->sc_dmat, map);
1043 		return ENOBUFS;
1044 	}
1045 
1046 	for (i = 0; i < map->dm_nsegs; i++) {
1047 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1048 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1049 
1050 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1051 
1052 #ifdef DWC_GMAC_DEBUG
1053 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1054 		    "len %lu\n", sc->sc_txq.t_cur,
1055 		    (unsigned long)map->dm_segs[i].ds_addr,
1056 		    (unsigned long)map->dm_segs[i].ds_len);
1057 #endif
1058 
1059 		sc->sc_descm->tx_init_flags(desc);
1060 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1061 
1062 		if (i == 0)
1063 			sc->sc_descm->tx_set_first_frag(desc);
1064 
1065 		/*
1066 		 * Defer passing ownership of the first descriptor
1067 		 * until we are done.
1068 		 */
1069 		if (i != 0)
1070 			sc->sc_descm->tx_set_owned_by_dev(desc);
1071 
1072 		sc->sc_txq.t_queued++;
1073 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1074 	}
1075 
1076 	sc->sc_descm->tx_set_last_frag(desc);
1077 
1078 	data->td_m = m0;
1079 	data->td_active = map;
1080 
1081 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1082 	    BUS_DMASYNC_PREWRITE);
1083 
1084 	/* Pass first to device */
1085 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1086 
1087 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1088 	    BUS_DMASYNC_PREWRITE);
1089 
1090 	return 0;
1091 }
1092 
1093 /*
1094  * If the interface is up and running, only modify the receive
1095  * filter when setting promiscuous or debug mode.  Otherwise fall
1096  * through to ether_ioctl, which will reset the chip.
1097  */
1098 static int
1099 dwc_gmac_ifflags_cb(struct ethercom *ec)
1100 {
1101 	struct ifnet *ifp = &ec->ec_if;
1102 	struct dwc_gmac_softc *sc = ifp->if_softc;
1103 	int ret = 0;
1104 
1105 	mutex_enter(sc->sc_lock);
1106 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
1107 	sc->sc_if_flags = ifp->if_flags;
1108 
1109 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1110 		ret = ENETRESET;
1111 		goto out;
1112 	}
1113 	if ((change & IFF_PROMISC) != 0) {
1114 		dwc_gmac_setmulti(sc);
1115 	}
1116 out:
1117 	mutex_exit(sc->sc_lock);
1118 
1119 	return ret;
1120 }
1121 
1122 static int
1123 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1124 {
1125 	struct dwc_gmac_softc *sc = ifp->if_softc;
1126 	int error = 0;
1127 
1128 	int s = splnet();
1129 	error = ether_ioctl(ifp, cmd, data);
1130 
1131 #ifdef DWCGMAC_MPSAFE
1132 	splx(s);
1133 #endif
1134 
1135 	if (error == ENETRESET) {
1136 		error = 0;
1137 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1138 			;
1139 		else if (ifp->if_flags & IFF_RUNNING) {
1140 			/*
1141 			 * Multicast list has changed; set the hardware filter
1142 			 * accordingly.
1143 			 */
1144 			mutex_enter(sc->sc_lock);
1145 			dwc_gmac_setmulti(sc);
1146 			mutex_exit(sc->sc_lock);
1147 		}
1148 	}
1149 
1150 	/* Try to get things going again */
1151 	if (ifp->if_flags & IFF_UP)
1152 		dwc_gmac_start(ifp);
1153 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1154 
1155 #ifndef DWCGMAC_MPSAFE
1156 	splx(s);
1157 #endif
1158 
1159 	return error;
1160 }
1161 
1162 static void
1163 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1164 {
1165 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1166 	struct dwc_gmac_tx_data *data;
1167 	struct dwc_gmac_dev_dmadesc *desc;
1168 	int i, nsegs;
1169 
1170 	mutex_enter(&sc->sc_txq.t_mtx);
1171 
1172 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1173 #ifdef DWC_GMAC_DEBUG
1174 		aprint_normal_dev(sc->sc_dev,
1175 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1176 		    i, sc->sc_txq.t_queued);
1177 #endif
1178 
1179 		/*
1180 		 * i+1 does not need to be a valid descriptor,
1181 		 * this is just a special notion to just sync
1182 		 * a single tx descriptor (i)
1183 		 */
1184 		dwc_gmac_txdesc_sync(sc, i, i+1,
1185 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1186 
1187 		desc = &sc->sc_txq.t_desc[i];
1188 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
1189 			break;
1190 
1191 		data = &sc->sc_txq.t_data[i];
1192 		if (data->td_m == NULL)
1193 			continue;
1194 
1195 		if_statinc(ifp, if_opackets);
1196 		nsegs = data->td_active->dm_nsegs;
1197 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1198 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1199 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1200 
1201 #ifdef DWC_GMAC_DEBUG
1202 		aprint_normal_dev(sc->sc_dev,
1203 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1204 		    "freeing mbuf %p\n", i, data->td_m);
1205 #endif
1206 
1207 		m_freem(data->td_m);
1208 		data->td_m = NULL;
1209 
1210 		sc->sc_txq.t_queued -= nsegs;
1211 	}
1212 
1213 	sc->sc_txq.t_next = i;
1214 
1215 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1216 		ifp->if_flags &= ~IFF_OACTIVE;
1217 	}
1218 	mutex_exit(&sc->sc_txq.t_mtx);
1219 }
1220 
1221 static void
1222 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1223 {
1224 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1225 	struct dwc_gmac_dev_dmadesc *desc;
1226 	struct dwc_gmac_rx_data *data;
1227 	bus_addr_t physaddr;
1228 	struct mbuf *m, *mnew;
1229 	int i, len, error;
1230 
1231 	mutex_enter(&sc->sc_rxq.r_mtx);
1232 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1233 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1234 		    RX_DESC_OFFSET(i), sizeof(*desc),
1235 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1236 		desc = &sc->sc_rxq.r_desc[i];
1237 		data = &sc->sc_rxq.r_data[i];
1238 
1239 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
1240 			break;
1241 
1242 		if (sc->sc_descm->rx_has_error(desc)) {
1243 #ifdef DWC_GMAC_DEBUG
1244 			aprint_normal_dev(sc->sc_dev,
1245 			    "RX error: descriptor status %08x, skipping\n",
1246 			    le32toh(desc->ddesc_status0));
1247 #endif
1248 			if_statinc(ifp, if_ierrors);
1249 			goto skip;
1250 		}
1251 
1252 		len = sc->sc_descm->rx_get_len(desc);
1253 
1254 #ifdef DWC_GMAC_DEBUG
1255 		aprint_normal_dev(sc->sc_dev,
1256 		    "rx int: device is done with descriptor #%d, len: %d\n",
1257 		    i, len);
1258 #endif
1259 
1260 		/*
1261 		 * Try to get a new mbuf before passing this one
1262 		 * up, if that fails, drop the packet and reuse
1263 		 * the existing one.
1264 		 */
1265 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1266 		if (mnew == NULL) {
1267 			if_statinc(ifp, if_ierrors);
1268 			goto skip;
1269 		}
1270 		MCLGET(mnew, M_DONTWAIT);
1271 		if ((mnew->m_flags & M_EXT) == 0) {
1272 			m_freem(mnew);
1273 			if_statinc(ifp, if_ierrors);
1274 			goto skip;
1275 		}
1276 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1277 		if (mnew->m_len > AWGE_MAX_PACKET) {
1278 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1279 		}
1280 
1281 		/* unload old DMA map */
1282 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1283 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1284 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1285 
1286 		/* and reload with new mbuf */
1287 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1288 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1289 		if (error != 0) {
1290 			m_freem(mnew);
1291 			/* try to reload old mbuf */
1292 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1293 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1294 			if (error != 0) {
1295 				panic("%s: could not load old rx mbuf",
1296 				    device_xname(sc->sc_dev));
1297 			}
1298 			if_statinc(ifp, if_ierrors);
1299 			goto skip;
1300 		}
1301 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1302 
1303 		/*
1304 		 * New mbuf loaded, update RX ring and continue
1305 		 */
1306 		m = data->rd_m;
1307 		data->rd_m = mnew;
1308 		desc->ddesc_data = htole32(physaddr);
1309 
1310 		/* finalize mbuf */
1311 		m->m_pkthdr.len = m->m_len = len;
1312 		m_set_rcvif(m, ifp);
1313 		m->m_flags |= M_HASFCS;
1314 
1315 		if_percpuq_enqueue(sc->sc_ipq, m);
1316 
1317 skip:
1318 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1319 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1320 
1321 		sc->sc_descm->rx_init_flags(desc);
1322 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1323 		sc->sc_descm->rx_set_owned_by_dev(desc);
1324 
1325 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1326 		    RX_DESC_OFFSET(i), sizeof(*desc),
1327 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1328 	}
1329 
1330 	/* update RX pointer */
1331 	sc->sc_rxq.r_cur = i;
1332 
1333 	mutex_exit(&sc->sc_rxq.r_mtx);
1334 }
1335 
1336 /*
1337  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1338  */
1339 static uint32_t
1340 bitrev32(uint32_t x)
1341 {
1342 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1343 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1344 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1345 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1346 
1347 	return (x >> 16) | (x << 16);
1348 }
1349 
1350 static void
1351 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1352 {
1353 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1354 	struct ether_multi *enm;
1355 	struct ether_multistep step;
1356 	struct ethercom *ec = &sc->sc_ec;
1357 	uint32_t hashes[2] = { 0, 0 };
1358 	uint32_t ffilt, h;
1359 	int mcnt;
1360 
1361 	KASSERT(mutex_owned(sc->sc_lock));
1362 
1363 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1364 
1365 	if (ifp->if_flags & IFF_PROMISC) {
1366 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1367 		goto special_filter;
1368 	}
1369 
1370 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1371 
1372 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1373 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1374 
1375 	ETHER_LOCK(ec);
1376 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1377 	ETHER_FIRST_MULTI(step, ec, enm);
1378 	mcnt = 0;
1379 	while (enm != NULL) {
1380 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1381 		    ETHER_ADDR_LEN) != 0) {
1382 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1383 			ec->ec_flags |= ETHER_F_ALLMULTI;
1384 			ETHER_UNLOCK(ec);
1385 			goto special_filter;
1386 		}
1387 
1388 		h = bitrev32(
1389 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1390 		    ) >> 26;
1391 		hashes[h >> 5] |= (1 << (h & 0x1f));
1392 
1393 		mcnt++;
1394 		ETHER_NEXT_MULTI(step, enm);
1395 	}
1396 	ETHER_UNLOCK(ec);
1397 
1398 	if (mcnt)
1399 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1400 	else
1401 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1402 
1403 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1404 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1405 	    hashes[0]);
1406 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1407 	    hashes[1]);
1408 	sc->sc_if_flags = ifp->if_flags;
1409 
1410 #ifdef DWC_GMAC_DEBUG
1411 	dwc_gmac_dump_ffilt(sc, ffilt);
1412 #endif
1413 	return;
1414 
1415 special_filter:
1416 #ifdef DWC_GMAC_DEBUG
1417 	dwc_gmac_dump_ffilt(sc, ffilt);
1418 #endif
1419 	/* no MAC hashes, ALLMULTI or PROMISC */
1420 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1421 	    ffilt);
1422 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1423 	    0xffffffff);
1424 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1425 	    0xffffffff);
1426 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1427 }
1428 
1429 int
1430 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1431 {
1432 	uint32_t status, dma_status;
1433 	int rv = 0;
1434 
1435 	if (sc->sc_stopping)
1436 		return 0;
1437 
1438 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1439 	if (status & AWIN_GMAC_MII_IRQ) {
1440 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1441 		    AWIN_GMAC_MII_STATUS);
1442 		rv = 1;
1443 		mii_pollstat(&sc->sc_mii);
1444 	}
1445 
1446 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1447 	    AWIN_GMAC_DMA_STATUS);
1448 
1449 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1450 		rv = 1;
1451 
1452 	if (dma_status & GMAC_DMA_INT_TIE)
1453 		dwc_gmac_tx_intr(sc);
1454 
1455 	if (dma_status & GMAC_DMA_INT_RIE)
1456 		dwc_gmac_rx_intr(sc);
1457 
1458 	/*
1459 	 * Check error conditions
1460 	 */
1461 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1462 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1463 #ifdef DWC_GMAC_DEBUG
1464 		dwc_dump_and_abort(sc, "interrupt error condition");
1465 #endif
1466 	}
1467 
1468 	rnd_add_uint32(&sc->rnd_source, dma_status);
1469 
1470 	/* ack interrupt */
1471 	if (dma_status)
1472 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1473 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1474 
1475 	/*
1476 	 * Get more packets
1477 	 */
1478 	if (rv)
1479 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
1480 
1481 	return rv;
1482 }
1483 
1484 static void
1485 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1486 {
1487 
1488 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1489 }
1490 
1491 static int
1492 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1493 {
1494 
1495 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1496 }
1497 
1498 static void
1499 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1500 {
1501 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1502 
1503 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1504 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1505 }
1506 
1507 static uint32_t
1508 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1509 {
1510 
1511 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1512 }
1513 
1514 static void
1515 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1516 {
1517 
1518 	desc->ddesc_status0 = 0;
1519 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1520 }
1521 
1522 static void
1523 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1524 {
1525 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1526 
1527 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1528 }
1529 
1530 static void
1531 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1532 {
1533 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1534 
1535 	desc->ddesc_cntl1 = htole32(cntl |
1536 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1537 }
1538 
1539 static void
1540 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1541 {
1542 
1543 	desc->ddesc_status0 = 0;
1544 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1545 }
1546 
1547 static int
1548 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1549 	return !!(le32toh(desc->ddesc_status0) &
1550 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1551 }
1552 
1553 static void
1554 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1555 {
1556 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1557 
1558 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1559 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1560 }
1561 
1562 static uint32_t
1563 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1564 {
1565 
1566 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1567 }
1568 
1569 static void
1570 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1571 {
1572 
1573 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1574 	desc->ddesc_cntl1 = 0;
1575 }
1576 
1577 static void
1578 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1579 {
1580 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1581 
1582 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1583 }
1584 
1585 static void
1586 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1587 {
1588 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1589 
1590 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1591 }
1592 
1593 static void
1594 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1595 {
1596 
1597 	desc->ddesc_status0 = 0;
1598 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1599 }
1600 
1601 static int
1602 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1603 {
1604 
1605 	return !!(le32toh(desc->ddesc_status0) &
1606 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
1607 }
1608 
1609 #ifdef DWC_GMAC_DEBUG
1610 static void
1611 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1612 {
1613 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1614 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1615 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1616 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1617 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1618 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1619 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1620 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1621 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1622 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1623 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1624 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1625 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1626 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1627 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1628 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1629 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1630 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1631 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1632 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1633 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1634 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1635 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1636 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1637 }
1638 
1639 static void
1640 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1641 {
1642 	int i;
1643 
1644 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1645 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1646 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1647 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1648 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1649 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1650 		    "data: %08x next: %08x\n",
1651 		    i, sc->sc_txq.t_physaddr +
1652 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1653 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1654 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1655 	}
1656 }
1657 
1658 static void
1659 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1660 {
1661 	int i;
1662 
1663 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1664 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1665 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1666 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1667 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1668 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1669 		    "data: %08x next: %08x\n",
1670 		    i, sc->sc_rxq.r_physaddr +
1671 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1672 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1673 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1674 	}
1675 }
1676 
1677 static void
1678 dwc_dump_status(struct dwc_gmac_softc *sc)
1679 {
1680 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1681 	     AWIN_GMAC_MAC_INTR);
1682 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1683 	     AWIN_GMAC_DMA_STATUS);
1684 	char buf[200];
1685 
1686 	/* print interrupt state */
1687 	snprintb(buf, sizeof(buf), "\177\20"
1688 	    "b\x10""NI\0"
1689 	    "b\x0f""AI\0"
1690 	    "b\x0e""ER\0"
1691 	    "b\x0d""FB\0"
1692 	    "b\x0a""ET\0"
1693 	    "b\x09""RW\0"
1694 	    "b\x08""RS\0"
1695 	    "b\x07""RU\0"
1696 	    "b\x06""RI\0"
1697 	    "b\x05""UN\0"
1698 	    "b\x04""OV\0"
1699 	    "b\x03""TJ\0"
1700 	    "b\x02""TU\0"
1701 	    "b\x01""TS\0"
1702 	    "b\x00""TI\0"
1703 	    "\0", dma_status);
1704 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1705 	    status, buf);
1706 }
1707 
1708 static void
1709 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1710 {
1711 	dwc_dump_status(sc);
1712 	dwc_gmac_dump_ffilt(sc,
1713 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1714 	dwc_gmac_dump_dma(sc);
1715 	dwc_gmac_dump_tx_desc(sc);
1716 	dwc_gmac_dump_rx_desc(sc);
1717 
1718 	panic("%s", msg);
1719 }
1720 
1721 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1722 {
1723 	char buf[200];
1724 
1725 	/* print filter setup */
1726 	snprintb(buf, sizeof(buf), "\177\20"
1727 	    "b\x1f""RA\0"
1728 	    "b\x0a""HPF\0"
1729 	    "b\x09""SAF\0"
1730 	    "b\x08""SAIF\0"
1731 	    "b\x05""DBF\0"
1732 	    "b\x04""PM\0"
1733 	    "b\x03""DAIF\0"
1734 	    "b\x02""HMC\0"
1735 	    "b\x01""HUC\0"
1736 	    "b\x00""PR\0"
1737 	    "\0", ffilt);
1738 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1739 }
1740 #endif
1741