xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision 0e2e28bced52bda3788c857106bde6c44d2df3b8)
1 /* $NetBSD: dwc_gmac.c,v 1.86 2024/03/14 16:43:00 jakllsch Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.86 2024/03/14 16:43:00 jakllsch Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #endif
51 
52 #include <sys/param.h>
53 #include <sys/bus.h>
54 #include <sys/device.h>
55 #include <sys/intr.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/cprng.h>
59 #include <sys/rndsource.h>
60 
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68 
69 #include <dev/mii/miivar.h>
70 
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73 
74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77 
78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
89 static int dwc_gmac_init(struct ifnet *);
90 static int dwc_gmac_init_locked(struct ifnet *);
91 static void dwc_gmac_stop(struct ifnet *, int);
92 static void dwc_gmac_stop_locked(struct ifnet *, int);
93 static void dwc_gmac_start(struct ifnet *);
94 static void dwc_gmac_start_locked(struct ifnet *);
95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
100 static int dwc_gmac_ifflags_cb(struct ethercom *);
101 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
102 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
103 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
104 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
106 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
109 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
111 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
113 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
116 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
117 
118 static const struct dwc_gmac_desc_methods desc_methods_standard = {
119 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
120 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
121 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
122 	.tx_set_len = dwc_gmac_desc_std_set_len,
123 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
124 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
125 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
126 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
127 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
128 	.rx_set_len = dwc_gmac_desc_std_set_len,
129 	.rx_get_len = dwc_gmac_desc_std_get_len,
130 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
131 };
132 
133 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
134 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
135 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
136 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
137 	.tx_set_len = dwc_gmac_desc_enh_set_len,
138 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
139 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
140 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
141 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
142 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
143 	.rx_set_len = dwc_gmac_desc_enh_set_len,
144 	.rx_get_len = dwc_gmac_desc_enh_get_len,
145 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
146 };
147 
148 
149 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
150 				    * sizeof(struct dwc_gmac_dev_dmadesc))
151 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
152 
153 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
154 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
155 
156 
157 
158 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
159 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
160 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
161 
162 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
163 				GMAC_DMA_INT_FBE |	\
164 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
165 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
166 				GMAC_DMA_INT_TJE)
167 
168 #define	AWIN_DEF_MAC_INTRMASK	\
169 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
170 	AWIN_GMAC_MAC_INT_LINKCHG)
171 
172 #ifdef DWC_GMAC_DEBUG
173 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
174 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
175 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
176 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
177 static void dwc_dump_status(struct dwc_gmac_softc *);
178 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
179 #endif
180 
181 int
182 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
183 {
184 	uint8_t enaddr[ETHER_ADDR_LEN];
185 	uint32_t maclo, machi, ver, hwft;
186 	struct mii_data * const mii = &sc->sc_mii;
187 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
188 	prop_dictionary_t dict;
189 
190 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
191 	sc->sc_mii_clk = mii_clk & 7;
192 
193 	dict = device_properties(sc->sc_dev);
194 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
195 	if (ea != NULL) {
196 		/*
197 		 * If the MAC address is overridden by a device property,
198 		 * use that.
199 		 */
200 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
201 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
202 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
203 	} else {
204 		/*
205 		 * If we did not get an externaly configure address,
206 		 * try to read one from the current filter setup,
207 		 * before resetting the chip.
208 		 */
209 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
210 		    AWIN_GMAC_MAC_ADDR0LO);
211 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212 		    AWIN_GMAC_MAC_ADDR0HI);
213 
214 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
215 			/* fake MAC address */
216 			maclo = 0x00f2 | (cprng_strong32() << 16);
217 			machi = cprng_strong32();
218 		}
219 
220 		enaddr[0] = maclo & 0x0ff;
221 		enaddr[1] = (maclo >> 8) & 0x0ff;
222 		enaddr[2] = (maclo >> 16) & 0x0ff;
223 		enaddr[3] = (maclo >> 24) & 0x0ff;
224 		enaddr[4] = machi & 0x0ff;
225 		enaddr[5] = (machi >> 8) & 0x0ff;
226 	}
227 
228 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
229 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
230 
231 	/*
232 	 * Init chip and do initial setup
233 	 */
234 	if (dwc_gmac_reset(sc) != 0)
235 		return ENXIO;	/* not much to cleanup, haven't attached yet */
236 	dwc_gmac_write_hwaddr(sc, enaddr);
237 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
238 	    ether_sprintf(enaddr));
239 
240 	hwft = 0;
241 	if (ver >= 0x35) {
242 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
243 		    AWIN_GMAC_DMA_HWFEATURES);
244 		aprint_normal_dev(sc->sc_dev,
245 		    "HW feature mask: %x\n", hwft);
246 	}
247 
248 	if (sizeof(bus_addr_t) > 4) {
249 		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
250 		    &sc->sc_dmat, BUS_DMA_WAITOK);
251 		if (error != 0) {
252 			aprint_error_dev(sc->sc_dev,
253 			    "failed to create DMA subregion\n");
254 			return ENOMEM;
255 		}
256 	}
257 
258 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
259 		aprint_normal_dev(sc->sc_dev,
260 		    "Using enhanced descriptor format\n");
261 		sc->sc_descm = &desc_methods_enhanced;
262 	} else {
263 		sc->sc_descm = &desc_methods_standard;
264 	}
265 	if (hwft & GMAC_DMA_FEAT_RMON) {
266 		uint32_t val;
267 
268 		/* Mask all MMC interrupts */
269 		val = 0xffffffff;
270 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
271 		    GMAC_MMC_RX_INT_MSK, val);
272 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
273 		    GMAC_MMC_TX_INT_MSK, val);
274 	}
275 
276 	/*
277 	 * Allocate Tx and Rx rings
278 	 */
279 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
280 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
281 		goto fail;
282 	}
283 
284 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
285 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
286 		goto fail;
287 	}
288 
289 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
290 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
291 		goto fail;
292 	}
293 
294 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
295 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
296 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
297 
298 	/*
299 	 * Prepare interface data
300 	 */
301 	ifp->if_softc = sc;
302 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
303 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304 #ifdef DWCGMAC_MPSAFE
305 	ifp->if_extflags = IFEF_MPSAFE;
306 #endif
307 	ifp->if_ioctl = dwc_gmac_ioctl;
308 	ifp->if_start = dwc_gmac_start;
309 	ifp->if_init = dwc_gmac_init;
310 	ifp->if_stop = dwc_gmac_stop;
311 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
312 	IFQ_SET_READY(&ifp->if_snd);
313 
314 	/*
315 	 * Attach MII subdevices
316 	 */
317 	sc->sc_ec.ec_mii = &sc->sc_mii;
318 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
319 	mii->mii_ifp = ifp;
320 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
321 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
322 	mii->mii_statchg = dwc_gmac_miibus_statchg;
323 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
324 	    MIIF_DOPAUSE);
325 
326 	if (LIST_EMPTY(&mii->mii_phys)) {
327 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
328 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
329 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
330 	} else {
331 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
332 	}
333 
334 	/*
335 	 * We can support 802.1Q VLAN-sized frames.
336 	 */
337 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
338 
339 	/*
340 	 * Ready, attach interface
341 	 */
342 	/* Attach the interface. */
343 	if_initialize(ifp);
344 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
345 	if_deferred_start_init(ifp, NULL);
346 	ether_ifattach(ifp, enaddr);
347 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
348 	if_register(ifp);
349 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
350 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
351 
352 	/*
353 	 * Enable interrupts
354 	 */
355 	mutex_enter(sc->sc_lock);
356 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
357 	    AWIN_DEF_MAC_INTRMASK);
358 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
359 	    GMAC_DEF_DMA_INT_MASK);
360 	mutex_exit(sc->sc_lock);
361 
362 	return 0;
363 
364 fail:
365 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
366 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
367 	dwc_gmac_free_dma_rings(sc);
368 	mutex_destroy(&sc->sc_mdio_lock);
369 
370 	return ENXIO;
371 }
372 
373 
374 
375 static int
376 dwc_gmac_reset(struct dwc_gmac_softc *sc)
377 {
378 	size_t cnt;
379 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
380 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
381 	    | GMAC_BUSMODE_RESET);
382 	for (cnt = 0; cnt < 30000; cnt++) {
383 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
384 		    & GMAC_BUSMODE_RESET) == 0)
385 			return 0;
386 		delay(10);
387 	}
388 
389 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
390 	return EIO;
391 }
392 
393 static void
394 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
395     uint8_t enaddr[ETHER_ADDR_LEN])
396 {
397 	uint32_t hi, lo;
398 
399 	hi = enaddr[4] | (enaddr[5] << 8);
400 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
401 	    | ((uint32_t)enaddr[3] << 24);
402 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
403 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
404 }
405 
406 static int
407 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
408 {
409 	struct dwc_gmac_softc * const sc = device_private(self);
410 	uint16_t mii;
411 	size_t cnt;
412 
413 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
414 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
415 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
416 	    | GMAC_MII_BUSY;
417 
418 	mutex_enter(&sc->sc_mdio_lock);
419 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
420 
421 	for (cnt = 0; cnt < 1000; cnt++) {
422 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
423 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
424 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
425 			    AWIN_GMAC_MAC_MIIDATA);
426 			break;
427 		}
428 		delay(10);
429 	}
430 
431 	mutex_exit(&sc->sc_mdio_lock);
432 
433 	if (cnt >= 1000)
434 		return ETIMEDOUT;
435 
436 	return 0;
437 }
438 
439 static int
440 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
441 {
442 	struct dwc_gmac_softc * const sc = device_private(self);
443 	uint16_t mii;
444 	size_t cnt;
445 
446 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
447 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
448 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
449 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
450 
451 	mutex_enter(&sc->sc_mdio_lock);
452 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
453 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
454 
455 	for (cnt = 0; cnt < 1000; cnt++) {
456 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
457 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
458 			break;
459 		delay(10);
460 	}
461 
462 	mutex_exit(&sc->sc_mdio_lock);
463 
464 	if (cnt >= 1000)
465 		return ETIMEDOUT;
466 
467 	return 0;
468 }
469 
470 static int
471 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
472 	struct dwc_gmac_rx_ring *ring)
473 {
474 	struct dwc_gmac_rx_data *data;
475 	bus_addr_t physaddr;
476 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
477 	int error, i, next;
478 
479 	ring->r_cur = ring->r_next = 0;
480 	memset(ring->r_desc, 0, descsize);
481 
482 	/*
483 	 * Pre-allocate Rx buffers and populate Rx ring.
484 	 */
485 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
486 		struct dwc_gmac_dev_dmadesc *desc;
487 
488 		data = &sc->sc_rxq.r_data[i];
489 
490 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
491 		if (data->rd_m == NULL) {
492 			aprint_error_dev(sc->sc_dev,
493 			    "could not allocate rx mbuf #%d\n", i);
494 			error = ENOMEM;
495 			goto fail;
496 		}
497 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
498 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
499 		if (error != 0) {
500 			aprint_error_dev(sc->sc_dev,
501 			    "could not create DMA map\n");
502 			data->rd_map = NULL;
503 			goto fail;
504 		}
505 		MCLGET(data->rd_m, M_DONTWAIT);
506 		if (!(data->rd_m->m_flags & M_EXT)) {
507 			aprint_error_dev(sc->sc_dev,
508 			    "could not allocate mbuf cluster #%d\n", i);
509 			error = ENOMEM;
510 			goto fail;
511 		}
512 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
513 		    = data->rd_m->m_ext.ext_size;
514 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
515 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
516 			    = AWGE_MAX_PACKET;
517 		}
518 
519 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
520 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
521 		if (error != 0) {
522 			aprint_error_dev(sc->sc_dev,
523 			    "could not load rx buf DMA map #%d", i);
524 			goto fail;
525 		}
526 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
527 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
528 		physaddr = data->rd_map->dm_segs[0].ds_addr;
529 
530 		desc = &sc->sc_rxq.r_desc[i];
531 		desc->ddesc_data = htole32(physaddr);
532 		next = RX_NEXT(i);
533 		desc->ddesc_next = htole32(ring->r_physaddr
534 		    + next * sizeof(*desc));
535 		sc->sc_descm->rx_init_flags(desc);
536 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
537 		sc->sc_descm->rx_set_owned_by_dev(desc);
538 	}
539 
540 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
541 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
542 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
543 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
544 	    ring->r_physaddr);
545 
546 	return 0;
547 
548 fail:
549 	dwc_gmac_free_rx_ring(sc, ring);
550 	return error;
551 }
552 
553 static void
554 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
555 	struct dwc_gmac_rx_ring *ring)
556 {
557 	struct dwc_gmac_dev_dmadesc *desc;
558 	struct dwc_gmac_rx_data *data;
559 	int i;
560 
561 	mutex_enter(&ring->r_mtx);
562 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
563 		desc = &sc->sc_rxq.r_desc[i];
564 		data = &sc->sc_rxq.r_data[i];
565 		sc->sc_descm->rx_init_flags(desc);
566 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
567 		sc->sc_descm->rx_set_owned_by_dev(desc);
568 	}
569 
570 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
571 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
572 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
573 
574 	ring->r_cur = ring->r_next = 0;
575 	/* reset DMA address to start of ring */
576 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
577 	    sc->sc_rxq.r_physaddr);
578 	mutex_exit(&ring->r_mtx);
579 }
580 
581 static int
582 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
583 {
584 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
585 		sizeof(struct dwc_gmac_dev_dmadesc);
586 	int error, nsegs;
587 	void *rings;
588 
589 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
590 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
591 	if (error != 0) {
592 		aprint_error_dev(sc->sc_dev,
593 		    "could not create desc DMA map\n");
594 		sc->sc_dma_ring_map = NULL;
595 		goto fail;
596 	}
597 
598 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
599 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
600 	if (error != 0) {
601 		aprint_error_dev(sc->sc_dev,
602 		    "could not map DMA memory\n");
603 		goto fail;
604 	}
605 
606 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
607 	    descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
608 	if (error != 0) {
609 		aprint_error_dev(sc->sc_dev,
610 		    "could not allocate DMA memory\n");
611 		goto fail;
612 	}
613 
614 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
615 	    descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
616 	if (error != 0) {
617 		aprint_error_dev(sc->sc_dev,
618 		    "could not load desc DMA map\n");
619 		goto fail;
620 	}
621 
622 	/* give first AWGE_RX_RING_COUNT to the RX side */
623 	sc->sc_rxq.r_desc = rings;
624 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
625 
626 	/* and next rings to the TX side */
627 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
628 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
629 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
630 
631 	return 0;
632 
633 fail:
634 	dwc_gmac_free_dma_rings(sc);
635 	return error;
636 }
637 
638 static void
639 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
640 {
641 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
642 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
643 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
644 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
645 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
646 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
647 }
648 
649 static void
650 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
651 {
652 	struct dwc_gmac_rx_data *data;
653 	int i;
654 
655 	if (ring->r_desc == NULL)
656 		return;
657 
658 
659 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
660 		data = &ring->r_data[i];
661 
662 		if (data->rd_map != NULL) {
663 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
664 			    AWGE_RX_RING_COUNT
665 				* sizeof(struct dwc_gmac_dev_dmadesc),
666 			    BUS_DMASYNC_POSTREAD);
667 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
668 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
669 		}
670 		if (data->rd_m != NULL)
671 			m_freem(data->rd_m);
672 	}
673 }
674 
675 static int
676 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
677 	struct dwc_gmac_tx_ring *ring)
678 {
679 	int i, error = 0;
680 
681 	ring->t_queued = 0;
682 	ring->t_cur = ring->t_next = 0;
683 
684 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
685 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
686 	    TX_DESC_OFFSET(0),
687 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
688 	    BUS_DMASYNC_POSTWRITE);
689 
690 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
691 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
692 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
693 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
694 		    &ring->t_data[i].td_map);
695 		if (error != 0) {
696 			aprint_error_dev(sc->sc_dev,
697 			    "could not create TX DMA map #%d\n", i);
698 			ring->t_data[i].td_map = NULL;
699 			goto fail;
700 		}
701 		ring->t_desc[i].ddesc_next = htole32(
702 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
703 		    *TX_NEXT(i));
704 	}
705 
706 	return 0;
707 
708 fail:
709 	dwc_gmac_free_tx_ring(sc, ring);
710 	return error;
711 }
712 
713 static void
714 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
715 {
716 	/* 'end' is pointing one descriptor beyond the last we want to sync */
717 	if (end > start) {
718 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
719 		    TX_DESC_OFFSET(start),
720 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
721 		    ops);
722 		return;
723 	}
724 	/* sync from 'start' to end of ring */
725 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
726 	    TX_DESC_OFFSET(start),
727 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
728 	    ops);
729 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
730 		/* sync from start of ring to 'end' */
731 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
732 		    TX_DESC_OFFSET(0),
733 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
734 		    ops);
735 	}
736 }
737 
738 static void
739 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
740 	struct dwc_gmac_tx_ring *ring)
741 {
742 	int i;
743 
744 	mutex_enter(&ring->t_mtx);
745 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
746 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
747 
748 		if (data->td_m != NULL) {
749 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
750 			    0, data->td_active->dm_mapsize,
751 			    BUS_DMASYNC_POSTWRITE);
752 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
753 			m_freem(data->td_m);
754 			data->td_m = NULL;
755 		}
756 	}
757 
758 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
759 	    TX_DESC_OFFSET(0),
760 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
761 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
762 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
763 	    sc->sc_txq.t_physaddr);
764 
765 	ring->t_queued = 0;
766 	ring->t_cur = ring->t_next = 0;
767 	mutex_exit(&ring->t_mtx);
768 }
769 
770 static void
771 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
772 	struct dwc_gmac_tx_ring *ring)
773 {
774 	int i;
775 
776 	/* unload the maps */
777 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
778 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
779 
780 		if (data->td_m != NULL) {
781 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
782 			    0, data->td_map->dm_mapsize,
783 			    BUS_DMASYNC_POSTWRITE);
784 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
785 			m_freem(data->td_m);
786 			data->td_m = NULL;
787 		}
788 	}
789 
790 	/* and actually free them */
791 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
792 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
793 
794 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
795 	}
796 }
797 
798 static void
799 dwc_gmac_miibus_statchg(struct ifnet *ifp)
800 {
801 	struct dwc_gmac_softc * const sc = ifp->if_softc;
802 	struct mii_data * const mii = &sc->sc_mii;
803 	uint32_t conf, flow;
804 
805 	/*
806 	 * Set MII or GMII interface based on the speed
807 	 * negotiated by the PHY.
808 	 */
809 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
810 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
811 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
812 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
813 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
814 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
815 	    | AWIN_GMAC_MAC_CONF_RXENABLE
816 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
817 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
818 	case IFM_10_T:
819 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
820 		break;
821 	case IFM_100_TX:
822 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
823 			AWIN_GMAC_MAC_CONF_MIISEL;
824 		break;
825 	case IFM_1000_T:
826 		break;
827 	}
828 	if (sc->sc_set_speed)
829 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
830 
831 	flow = 0;
832 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
833 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
834 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
835 	}
836 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
837 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
838 	}
839 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
840 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
841 	}
842 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
843 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
844 
845 #ifdef DWC_GMAC_DEBUG
846 	aprint_normal_dev(sc->sc_dev,
847 	    "setting MAC conf register: %08x\n", conf);
848 #endif
849 
850 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
851 	    AWIN_GMAC_MAC_CONF, conf);
852 }
853 
854 static int
855 dwc_gmac_init(struct ifnet *ifp)
856 {
857 	struct dwc_gmac_softc *sc = ifp->if_softc;
858 
859 	mutex_enter(sc->sc_lock);
860 	int ret = dwc_gmac_init_locked(ifp);
861 	mutex_exit(sc->sc_lock);
862 
863 	return ret;
864 }
865 
866 static int
867 dwc_gmac_init_locked(struct ifnet *ifp)
868 {
869 	struct dwc_gmac_softc *sc = ifp->if_softc;
870 	uint32_t ffilt;
871 
872 	if (ifp->if_flags & IFF_RUNNING)
873 		return 0;
874 
875 	dwc_gmac_stop_locked(ifp, 0);
876 
877 	/*
878 	 * Configure DMA burst/transfer mode and RX/TX priorities.
879 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
880 	 */
881 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
882 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
883 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
884 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
885 
886 	/*
887 	 * Set up address filter
888 	 */
889 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
890 	if (ifp->if_flags & IFF_PROMISC) {
891 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
892 	} else {
893 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
894 	}
895 	if (ifp->if_flags & IFF_BROADCAST) {
896 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
897 	} else {
898 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
899 	}
900 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
901 
902 	/*
903 	 * Set up multicast filter
904 	 */
905 	dwc_gmac_setmulti(sc);
906 
907 	/*
908 	 * Set up dma pointer for RX and TX ring
909 	 */
910 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
911 	    sc->sc_rxq.r_physaddr);
912 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
913 	    sc->sc_txq.t_physaddr);
914 
915 	/*
916 	 * Start RX/TX part
917 	 */
918 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
919 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
920 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
921 	}
922 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
923 
924 	sc->sc_stopping = false;
925 
926 	ifp->if_flags |= IFF_RUNNING;
927 	sc->sc_txbusy = false;
928 
929 	return 0;
930 }
931 
932 static void
933 dwc_gmac_start(struct ifnet *ifp)
934 {
935 	struct dwc_gmac_softc *sc = ifp->if_softc;
936 #ifdef DWCGMAC_MPSAFE
937 	KASSERT(if_is_mpsafe(ifp));
938 #endif
939 
940 	mutex_enter(sc->sc_lock);
941 	if (!sc->sc_stopping) {
942 		mutex_enter(&sc->sc_txq.t_mtx);
943 		dwc_gmac_start_locked(ifp);
944 		mutex_exit(&sc->sc_txq.t_mtx);
945 	}
946 	mutex_exit(sc->sc_lock);
947 }
948 
949 static void
950 dwc_gmac_start_locked(struct ifnet *ifp)
951 {
952 	struct dwc_gmac_softc *sc = ifp->if_softc;
953 	int old = sc->sc_txq.t_queued;
954 	int start = sc->sc_txq.t_cur;
955 	struct mbuf *m0;
956 
957 	if ((ifp->if_flags & IFF_RUNNING) == 0)
958 		return;
959 	if (sc->sc_txbusy)
960 		return;
961 
962 	for (;;) {
963 		IFQ_POLL(&ifp->if_snd, m0);
964 		if (m0 == NULL)
965 			break;
966 		if (dwc_gmac_queue(sc, m0) != 0) {
967 			sc->sc_txbusy = true;
968 			break;
969 		}
970 		IFQ_DEQUEUE(&ifp->if_snd, m0);
971 		bpf_mtap(ifp, m0, BPF_D_OUT);
972 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
973 			sc->sc_txbusy = true;
974 			break;
975 		}
976 	}
977 
978 	if (sc->sc_txq.t_queued != old) {
979 		/* packets have been queued, kick it off */
980 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
981 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
982 
983 #ifdef DWC_GMAC_DEBUG
984 		dwc_dump_status(sc);
985 #endif
986 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
987 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
988 	}
989 }
990 
991 static void
992 dwc_gmac_stop(struct ifnet *ifp, int disable)
993 {
994 	struct dwc_gmac_softc *sc = ifp->if_softc;
995 
996 	mutex_enter(sc->sc_lock);
997 	dwc_gmac_stop_locked(ifp, disable);
998 	mutex_exit(sc->sc_lock);
999 }
1000 
1001 static void
1002 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1003 {
1004 	struct dwc_gmac_softc *sc = ifp->if_softc;
1005 
1006 	sc->sc_stopping = true;
1007 
1008 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1009 	    AWIN_GMAC_DMA_OPMODE,
1010 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1011 		AWIN_GMAC_DMA_OPMODE)
1012 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1013 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1014 	    AWIN_GMAC_DMA_OPMODE,
1015 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1016 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1017 
1018 	mii_down(&sc->sc_mii);
1019 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1020 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1021 
1022 	ifp->if_flags &= ~IFF_RUNNING;
1023 	sc->sc_txbusy = false;
1024 }
1025 
1026 /*
1027  * Add m0 to the TX ring
1028  */
1029 static int
1030 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1031 {
1032 	struct dwc_gmac_dev_dmadesc *desc = NULL;
1033 	struct dwc_gmac_tx_data *data = NULL;
1034 	bus_dmamap_t map;
1035 	int error, i, first;
1036 
1037 #ifdef DWC_GMAC_DEBUG
1038 	aprint_normal_dev(sc->sc_dev,
1039 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1040 #endif
1041 
1042 	first = sc->sc_txq.t_cur;
1043 	map = sc->sc_txq.t_data[first].td_map;
1044 
1045 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1046 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1047 	if (error != 0) {
1048 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
1049 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1050 		return error;
1051 	}
1052 
1053 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1054 		bus_dmamap_unload(sc->sc_dmat, map);
1055 		return ENOBUFS;
1056 	}
1057 
1058 	for (i = 0; i < map->dm_nsegs; i++) {
1059 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1060 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1061 
1062 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1063 
1064 #ifdef DWC_GMAC_DEBUG
1065 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1066 		    "len %lu\n", sc->sc_txq.t_cur,
1067 		    (unsigned long)map->dm_segs[i].ds_addr,
1068 		    (unsigned long)map->dm_segs[i].ds_len);
1069 #endif
1070 
1071 		sc->sc_descm->tx_init_flags(desc);
1072 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1073 
1074 		if (i == 0)
1075 			sc->sc_descm->tx_set_first_frag(desc);
1076 
1077 		/*
1078 		 * Defer passing ownership of the first descriptor
1079 		 * until we are done.
1080 		 */
1081 		if (i != 0)
1082 			sc->sc_descm->tx_set_owned_by_dev(desc);
1083 
1084 		sc->sc_txq.t_queued++;
1085 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1086 	}
1087 
1088 	sc->sc_descm->tx_set_last_frag(desc);
1089 
1090 	data->td_m = m0;
1091 	data->td_active = map;
1092 
1093 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1094 	    BUS_DMASYNC_PREWRITE);
1095 
1096 	/* Pass first to device */
1097 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1098 
1099 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1100 	    BUS_DMASYNC_PREWRITE);
1101 
1102 	return 0;
1103 }
1104 
1105 /*
1106  * If the interface is up and running, only modify the receive
1107  * filter when setting promiscuous or debug mode.  Otherwise fall
1108  * through to ether_ioctl, which will reset the chip.
1109  */
1110 static int
1111 dwc_gmac_ifflags_cb(struct ethercom *ec)
1112 {
1113 	struct ifnet *ifp = &ec->ec_if;
1114 	struct dwc_gmac_softc *sc = ifp->if_softc;
1115 	int ret = 0;
1116 
1117 	mutex_enter(sc->sc_lock);
1118 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
1119 	sc->sc_if_flags = ifp->if_flags;
1120 
1121 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1122 		ret = ENETRESET;
1123 		goto out;
1124 	}
1125 	if ((change & IFF_PROMISC) != 0) {
1126 		dwc_gmac_setmulti(sc);
1127 	}
1128 out:
1129 	mutex_exit(sc->sc_lock);
1130 
1131 	return ret;
1132 }
1133 
1134 static int
1135 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1136 {
1137 	struct dwc_gmac_softc *sc = ifp->if_softc;
1138 	int error = 0;
1139 
1140 	int s = splnet();
1141 	error = ether_ioctl(ifp, cmd, data);
1142 
1143 #ifdef DWCGMAC_MPSAFE
1144 	splx(s);
1145 #endif
1146 
1147 	if (error == ENETRESET) {
1148 		error = 0;
1149 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1150 			;
1151 		else if (ifp->if_flags & IFF_RUNNING) {
1152 			/*
1153 			 * Multicast list has changed; set the hardware filter
1154 			 * accordingly.
1155 			 */
1156 			mutex_enter(sc->sc_lock);
1157 			dwc_gmac_setmulti(sc);
1158 			mutex_exit(sc->sc_lock);
1159 		}
1160 	}
1161 
1162 	/* Try to get things going again */
1163 	if (ifp->if_flags & IFF_UP)
1164 		dwc_gmac_start(ifp);
1165 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1166 
1167 #ifndef DWCGMAC_MPSAFE
1168 	splx(s);
1169 #endif
1170 
1171 	return error;
1172 }
1173 
1174 static void
1175 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1176 {
1177 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1178 	struct dwc_gmac_tx_data *data;
1179 	struct dwc_gmac_dev_dmadesc *desc;
1180 	int i, nsegs;
1181 
1182 	mutex_enter(&sc->sc_txq.t_mtx);
1183 
1184 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1185 #ifdef DWC_GMAC_DEBUG
1186 		aprint_normal_dev(sc->sc_dev,
1187 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1188 		    i, sc->sc_txq.t_queued);
1189 #endif
1190 
1191 		/*
1192 		 * i + 1 does not need to be a valid descriptor,
1193 		 * this is just a special notion to just sync
1194 		 * a single tx descriptor (i)
1195 		 */
1196 		dwc_gmac_txdesc_sync(sc, i, i + 1,
1197 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1198 
1199 		desc = &sc->sc_txq.t_desc[i];
1200 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
1201 			break;
1202 
1203 		data = &sc->sc_txq.t_data[i];
1204 		if (data->td_m == NULL)
1205 			continue;
1206 
1207 		if_statinc(ifp, if_opackets);
1208 		nsegs = data->td_active->dm_nsegs;
1209 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1210 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1211 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1212 
1213 #ifdef DWC_GMAC_DEBUG
1214 		aprint_normal_dev(sc->sc_dev,
1215 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1216 		    "freeing mbuf %p\n", i, data->td_m);
1217 #endif
1218 
1219 		m_freem(data->td_m);
1220 		data->td_m = NULL;
1221 
1222 		sc->sc_txq.t_queued -= nsegs;
1223 	}
1224 
1225 	sc->sc_txq.t_next = i;
1226 
1227 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1228 		sc->sc_txbusy = false;
1229 	}
1230 	mutex_exit(&sc->sc_txq.t_mtx);
1231 }
1232 
1233 static void
1234 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1235 {
1236 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1237 	struct dwc_gmac_dev_dmadesc *desc;
1238 	struct dwc_gmac_rx_data *data;
1239 	bus_addr_t physaddr;
1240 	struct mbuf *m, *mnew;
1241 	int i, len, error;
1242 
1243 	mutex_enter(&sc->sc_rxq.r_mtx);
1244 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1245 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1246 		    RX_DESC_OFFSET(i), sizeof(*desc),
1247 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1248 		desc = &sc->sc_rxq.r_desc[i];
1249 		data = &sc->sc_rxq.r_data[i];
1250 
1251 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
1252 			break;
1253 
1254 		if (sc->sc_descm->rx_has_error(desc)) {
1255 #ifdef DWC_GMAC_DEBUG
1256 			aprint_normal_dev(sc->sc_dev,
1257 			    "RX error: descriptor status %08x, skipping\n",
1258 			    le32toh(desc->ddesc_status0));
1259 #endif
1260 			if_statinc(ifp, if_ierrors);
1261 			goto skip;
1262 		}
1263 
1264 		len = sc->sc_descm->rx_get_len(desc);
1265 
1266 #ifdef DWC_GMAC_DEBUG
1267 		aprint_normal_dev(sc->sc_dev,
1268 		    "rx int: device is done with descriptor #%d, len: %d\n",
1269 		    i, len);
1270 #endif
1271 
1272 		/*
1273 		 * Try to get a new mbuf before passing this one
1274 		 * up, if that fails, drop the packet and reuse
1275 		 * the existing one.
1276 		 */
1277 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1278 		if (mnew == NULL) {
1279 			if_statinc(ifp, if_ierrors);
1280 			goto skip;
1281 		}
1282 		MCLGET(mnew, M_DONTWAIT);
1283 		if ((mnew->m_flags & M_EXT) == 0) {
1284 			m_freem(mnew);
1285 			if_statinc(ifp, if_ierrors);
1286 			goto skip;
1287 		}
1288 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1289 		if (mnew->m_len > AWGE_MAX_PACKET) {
1290 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1291 		}
1292 
1293 		/* unload old DMA map */
1294 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1295 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1296 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1297 
1298 		/* and reload with new mbuf */
1299 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1300 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1301 		if (error != 0) {
1302 			m_freem(mnew);
1303 			/* try to reload old mbuf */
1304 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1305 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1306 			if (error != 0) {
1307 				panic("%s: could not load old rx mbuf",
1308 				    device_xname(sc->sc_dev));
1309 			}
1310 			if_statinc(ifp, if_ierrors);
1311 			goto skip;
1312 		}
1313 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1314 
1315 		/*
1316 		 * New mbuf loaded, update RX ring and continue
1317 		 */
1318 		m = data->rd_m;
1319 		data->rd_m = mnew;
1320 		desc->ddesc_data = htole32(physaddr);
1321 
1322 		/* finalize mbuf */
1323 		m->m_pkthdr.len = m->m_len = len;
1324 		m_set_rcvif(m, ifp);
1325 		m->m_flags |= M_HASFCS;
1326 
1327 		if_percpuq_enqueue(sc->sc_ipq, m);
1328 
1329 skip:
1330 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1331 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1332 
1333 		sc->sc_descm->rx_init_flags(desc);
1334 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1335 		sc->sc_descm->rx_set_owned_by_dev(desc);
1336 
1337 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1338 		    RX_DESC_OFFSET(i), sizeof(*desc),
1339 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1340 	}
1341 
1342 	/* update RX pointer */
1343 	sc->sc_rxq.r_cur = i;
1344 
1345 	mutex_exit(&sc->sc_rxq.r_mtx);
1346 }
1347 
1348 static void
1349 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1350 {
1351 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1352 	struct ether_multi *enm;
1353 	struct ether_multistep step;
1354 	struct ethercom *ec = &sc->sc_ec;
1355 	uint32_t hashes[2] = { 0, 0 };
1356 	uint32_t ffilt, h;
1357 	int mcnt;
1358 
1359 	KASSERT(mutex_owned(sc->sc_lock));
1360 
1361 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1362 
1363 	if (ifp->if_flags & IFF_PROMISC) {
1364 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1365 		goto special_filter;
1366 	}
1367 
1368 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1369 
1370 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1371 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1372 
1373 	ETHER_LOCK(ec);
1374 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1375 	ETHER_FIRST_MULTI(step, ec, enm);
1376 	mcnt = 0;
1377 	while (enm != NULL) {
1378 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1379 		    ETHER_ADDR_LEN) != 0) {
1380 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1381 			ec->ec_flags |= ETHER_F_ALLMULTI;
1382 			ETHER_UNLOCK(ec);
1383 			goto special_filter;
1384 		}
1385 
1386 		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1387 		hashes[h >> 5] |= (1 << (h & 0x1f));
1388 
1389 		mcnt++;
1390 		ETHER_NEXT_MULTI(step, enm);
1391 	}
1392 	ETHER_UNLOCK(ec);
1393 
1394 	if (mcnt)
1395 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1396 	else
1397 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1398 
1399 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1400 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1401 	    hashes[0]);
1402 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1403 	    hashes[1]);
1404 	sc->sc_if_flags = ifp->if_flags;
1405 
1406 #ifdef DWC_GMAC_DEBUG
1407 	dwc_gmac_dump_ffilt(sc, ffilt);
1408 #endif
1409 	return;
1410 
1411 special_filter:
1412 #ifdef DWC_GMAC_DEBUG
1413 	dwc_gmac_dump_ffilt(sc, ffilt);
1414 #endif
1415 	/* no MAC hashes, ALLMULTI or PROMISC */
1416 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1417 	    ffilt);
1418 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1419 	    0xffffffff);
1420 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1421 	    0xffffffff);
1422 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1423 }
1424 
1425 int
1426 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1427 {
1428 	uint32_t status, dma_status;
1429 	int rv = 0;
1430 
1431 	if (sc->sc_stopping)
1432 		return 0;
1433 
1434 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1435 	if (status & AWIN_GMAC_MII_IRQ) {
1436 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1437 		    AWIN_GMAC_MII_STATUS);
1438 		rv = 1;
1439 		mii_pollstat(&sc->sc_mii);
1440 	}
1441 
1442 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1443 	    AWIN_GMAC_DMA_STATUS);
1444 
1445 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1446 		rv = 1;
1447 
1448 	if (dma_status & GMAC_DMA_INT_TIE)
1449 		dwc_gmac_tx_intr(sc);
1450 
1451 	if (dma_status & GMAC_DMA_INT_RIE)
1452 		dwc_gmac_rx_intr(sc);
1453 
1454 	/*
1455 	 * Check error conditions
1456 	 */
1457 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1458 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1459 #ifdef DWC_GMAC_DEBUG
1460 		dwc_dump_and_abort(sc, "interrupt error condition");
1461 #endif
1462 	}
1463 
1464 	rnd_add_uint32(&sc->rnd_source, dma_status);
1465 
1466 	/* ack interrupt */
1467 	if (dma_status)
1468 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1469 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1470 
1471 	/*
1472 	 * Get more packets
1473 	 */
1474 	if (rv)
1475 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
1476 
1477 	return rv;
1478 }
1479 
1480 static void
1481 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1482 {
1483 
1484 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1485 }
1486 
1487 static int
1488 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1489 {
1490 
1491 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1492 }
1493 
1494 static void
1495 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1496 {
1497 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1498 
1499 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1500 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1501 }
1502 
1503 static uint32_t
1504 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1505 {
1506 
1507 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1508 }
1509 
1510 static void
1511 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1512 {
1513 
1514 	desc->ddesc_status0 = 0;
1515 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1516 }
1517 
1518 static void
1519 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1520 {
1521 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1522 
1523 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1524 }
1525 
1526 static void
1527 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1528 {
1529 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1530 
1531 	desc->ddesc_cntl1 = htole32(cntl |
1532 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1533 }
1534 
1535 static void
1536 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1537 {
1538 
1539 	desc->ddesc_status0 = 0;
1540 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1541 }
1542 
1543 static int
1544 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1545 	return !!(le32toh(desc->ddesc_status0) &
1546 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1547 }
1548 
1549 static void
1550 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1551 {
1552 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1553 
1554 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1555 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1556 }
1557 
1558 static uint32_t
1559 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1560 {
1561 
1562 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1563 }
1564 
1565 static void
1566 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1567 {
1568 
1569 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1570 	desc->ddesc_cntl1 = 0;
1571 }
1572 
1573 static void
1574 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1575 {
1576 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1577 
1578 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1579 }
1580 
1581 static void
1582 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1583 {
1584 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1585 
1586 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1587 }
1588 
1589 static void
1590 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1591 {
1592 
1593 	desc->ddesc_status0 = 0;
1594 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1595 }
1596 
1597 static int
1598 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1599 {
1600 
1601 	return !!(le32toh(desc->ddesc_status0) &
1602 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
1603 }
1604 
1605 #ifdef DWC_GMAC_DEBUG
1606 static void
1607 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1608 {
1609 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1610 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1611 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1612 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1613 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1614 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1615 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1616 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1617 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1618 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1619 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1620 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1621 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1622 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1623 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1624 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1625 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1626 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1627 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1628 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1629 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1630 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1631 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1632 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1633 }
1634 
1635 static void
1636 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1637 {
1638 	int i;
1639 
1640 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1641 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1642 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1643 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1644 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1645 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1646 		    "data: %08x next: %08x\n",
1647 		    i, sc->sc_txq.t_physaddr +
1648 			i * sizeof(struct dwc_gmac_dev_dmadesc),
1649 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1650 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1651 	}
1652 }
1653 
1654 static void
1655 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1656 {
1657 	int i;
1658 
1659 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1660 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1661 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1662 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1663 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1664 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1665 		    "data: %08x next: %08x\n",
1666 		    i, sc->sc_rxq.r_physaddr +
1667 			i * sizeof(struct dwc_gmac_dev_dmadesc),
1668 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1669 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1670 	}
1671 }
1672 
1673 static void
1674 dwc_dump_status(struct dwc_gmac_softc *sc)
1675 {
1676 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1677 	     AWIN_GMAC_MAC_INTR);
1678 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1679 	     AWIN_GMAC_DMA_STATUS);
1680 	char buf[200];
1681 
1682 	/* print interrupt state */
1683 	snprintb(buf, sizeof(buf), "\177\20"
1684 	    "b\x10""NI\0"
1685 	    "b\x0f""AI\0"
1686 	    "b\x0e""ER\0"
1687 	    "b\x0d""FB\0"
1688 	    "b\x0a""ET\0"
1689 	    "b\x09""RW\0"
1690 	    "b\x08""RS\0"
1691 	    "b\x07""RU\0"
1692 	    "b\x06""RI\0"
1693 	    "b\x05""UN\0"
1694 	    "b\x04""OV\0"
1695 	    "b\x03""TJ\0"
1696 	    "b\x02""TU\0"
1697 	    "b\x01""TS\0"
1698 	    "b\x00""TI\0"
1699 	    "\0", dma_status);
1700 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1701 	    status, buf);
1702 }
1703 
1704 static void
1705 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1706 {
1707 	dwc_dump_status(sc);
1708 	dwc_gmac_dump_ffilt(sc,
1709 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1710 	dwc_gmac_dump_dma(sc);
1711 	dwc_gmac_dump_tx_desc(sc);
1712 	dwc_gmac_dump_rx_desc(sc);
1713 
1714 	panic("%s", msg);
1715 }
1716 
1717 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1718 {
1719 	char buf[200];
1720 
1721 	/* print filter setup */
1722 	snprintb(buf, sizeof(buf), "\177\20"
1723 	    "b\x1f""RA\0"
1724 	    "b\x0a""HPF\0"
1725 	    "b\x09""SAF\0"
1726 	    "b\x08""SAIF\0"
1727 	    "b\x05""DBF\0"
1728 	    "b\x04""PM\0"
1729 	    "b\x03""DAIF\0"
1730 	    "b\x02""HMC\0"
1731 	    "b\x01""HUC\0"
1732 	    "b\x00""PR\0"
1733 	    "\0", ffilt);
1734 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1735 }
1736 #endif
1737