xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision ccd9df534e375a4366c5b55f23782053c7a98d82)
1 /* $NetBSD: dwc_gmac.c,v 1.88 2024/07/05 04:31:51 rin Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.88 2024/07/05 04:31:51 rin Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #endif
51 
52 #include <sys/param.h>
53 #include <sys/bus.h>
54 #include <sys/device.h>
55 #include <sys/intr.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/cprng.h>
59 #include <sys/rndsource.h>
60 
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68 
69 #include <dev/mii/miivar.h>
70 
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73 
74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77 
78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
89 static int dwc_gmac_init(struct ifnet *);
90 static int dwc_gmac_init_locked(struct ifnet *);
91 static void dwc_gmac_stop(struct ifnet *, int);
92 static void dwc_gmac_stop_locked(struct ifnet *, int);
93 static void dwc_gmac_start(struct ifnet *);
94 static void dwc_gmac_start_locked(struct ifnet *);
95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
100 static int dwc_gmac_ifflags_cb(struct ethercom *);
101 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
102 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
103 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
104 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
106 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
109 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
111 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
113 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
116 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
117 
118 static const struct dwc_gmac_desc_methods desc_methods_standard = {
119 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
120 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
121 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
122 	.tx_set_len = dwc_gmac_desc_std_set_len,
123 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
124 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
125 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
126 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
127 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
128 	.rx_set_len = dwc_gmac_desc_std_set_len,
129 	.rx_get_len = dwc_gmac_desc_std_get_len,
130 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
131 };
132 
133 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
134 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
135 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
136 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
137 	.tx_set_len = dwc_gmac_desc_enh_set_len,
138 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
139 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
140 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
141 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
142 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
143 	.rx_set_len = dwc_gmac_desc_enh_set_len,
144 	.rx_get_len = dwc_gmac_desc_enh_get_len,
145 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
146 };
147 
148 
149 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
150 				    * sizeof(struct dwc_gmac_dev_dmadesc))
151 #define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
152 
153 #define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
154 #define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
155 
156 
157 
158 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
159 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
160 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
161 
162 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
163 				GMAC_DMA_INT_FBE |	\
164 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
165 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
166 				GMAC_DMA_INT_TJE)
167 
168 #define	AWIN_DEF_MAC_INTRMASK	\
169 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
170 	AWIN_GMAC_MAC_INT_LINKCHG)
171 
172 #ifdef DWC_GMAC_DEBUG
173 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
174 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
175 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
176 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
177 static void dwc_dump_status(struct dwc_gmac_softc *);
178 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
179 #endif
180 
181 int
182 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
183 {
184 	uint8_t enaddr[ETHER_ADDR_LEN];
185 	uint32_t maclo, machi, ver, hwft;
186 	struct mii_data * const mii = &sc->sc_mii;
187 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
188 	prop_dictionary_t dict;
189 
190 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
191 	sc->sc_mii_clk = mii_clk & 7;
192 
193 	dict = device_properties(sc->sc_dev);
194 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
195 	if (ea != NULL) {
196 		/*
197 		 * If the MAC address is overridden by a device property,
198 		 * use that.
199 		 */
200 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
201 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
202 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
203 	} else {
204 		/*
205 		 * If we did not get an externaly configure address,
206 		 * try to read one from the current filter setup,
207 		 * before resetting the chip.
208 		 */
209 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
210 		    AWIN_GMAC_MAC_ADDR0LO);
211 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212 		    AWIN_GMAC_MAC_ADDR0HI);
213 
214 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
215 			/* fake MAC address */
216 			maclo = 0x00f2 | (cprng_strong32() << 16);
217 			machi = cprng_strong32();
218 		}
219 
220 		enaddr[0] = maclo & 0x0ff;
221 		enaddr[1] = (maclo >> 8) & 0x0ff;
222 		enaddr[2] = (maclo >> 16) & 0x0ff;
223 		enaddr[3] = (maclo >> 24) & 0x0ff;
224 		enaddr[4] = machi & 0x0ff;
225 		enaddr[5] = (machi >> 8) & 0x0ff;
226 	}
227 
228 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
229 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
230 
231 	/*
232 	 * Init chip and do initial setup
233 	 */
234 	if (dwc_gmac_reset(sc) != 0)
235 		return ENXIO;	/* not much to cleanup, haven't attached yet */
236 	dwc_gmac_write_hwaddr(sc, enaddr);
237 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
238 	    ether_sprintf(enaddr));
239 
240 	hwft = 0;
241 	if (ver >= 0x35) {
242 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
243 		    AWIN_GMAC_DMA_HWFEATURES);
244 		aprint_normal_dev(sc->sc_dev,
245 		    "HW feature mask: %x\n", hwft);
246 	}
247 
248 	if (sizeof(bus_addr_t) > 4) {
249 		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
250 		    &sc->sc_dmat, BUS_DMA_WAITOK);
251 		if (error != 0) {
252 			aprint_error_dev(sc->sc_dev,
253 			    "failed to create DMA subregion\n");
254 			return ENOMEM;
255 		}
256 	}
257 
258 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
259 		aprint_normal_dev(sc->sc_dev,
260 		    "Using enhanced descriptor format\n");
261 		sc->sc_descm = &desc_methods_enhanced;
262 	} else {
263 		sc->sc_descm = &desc_methods_standard;
264 	}
265 	if (hwft & GMAC_DMA_FEAT_RMON) {
266 		uint32_t val;
267 
268 		/* Mask all MMC interrupts */
269 		val = 0xffffffff;
270 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
271 		    GMAC_MMC_RX_INT_MSK, val);
272 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
273 		    GMAC_MMC_TX_INT_MSK, val);
274 	}
275 
276 	/*
277 	 * Allocate Tx and Rx rings
278 	 */
279 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
280 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
281 		goto fail;
282 	}
283 
284 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
285 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
286 		goto fail;
287 	}
288 
289 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
290 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
291 		goto fail;
292 	}
293 
294 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
295 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
296 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
297 
298 	/*
299 	 * Prepare interface data
300 	 */
301 	ifp->if_softc = sc;
302 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
303 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304 #ifdef DWCGMAC_MPSAFE
305 	ifp->if_extflags = IFEF_MPSAFE;
306 #endif
307 	ifp->if_ioctl = dwc_gmac_ioctl;
308 	ifp->if_start = dwc_gmac_start;
309 	ifp->if_init = dwc_gmac_init;
310 	ifp->if_stop = dwc_gmac_stop;
311 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
312 	IFQ_SET_READY(&ifp->if_snd);
313 
314 	/*
315 	 * Attach MII subdevices
316 	 */
317 	sc->sc_ec.ec_mii = &sc->sc_mii;
318 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
319 	mii->mii_ifp = ifp;
320 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
321 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
322 	mii->mii_statchg = dwc_gmac_miibus_statchg;
323 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
324 	    MIIF_DOPAUSE);
325 
326 	if (LIST_EMPTY(&mii->mii_phys)) {
327 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
328 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
329 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
330 	} else {
331 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
332 	}
333 
334 	/*
335 	 * We can support 802.1Q VLAN-sized frames.
336 	 */
337 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
338 
339 	/*
340 	 * Ready, attach interface
341 	 */
342 	/* Attach the interface. */
343 	if_initialize(ifp);
344 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
345 	if_deferred_start_init(ifp, NULL);
346 	ether_ifattach(ifp, enaddr);
347 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
348 	if_register(ifp);
349 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
350 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
351 
352 	/*
353 	 * Enable interrupts
354 	 */
355 	mutex_enter(sc->sc_lock);
356 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
357 	    AWIN_DEF_MAC_INTRMASK);
358 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
359 	    GMAC_DEF_DMA_INT_MASK);
360 	mutex_exit(sc->sc_lock);
361 
362 	return 0;
363 
364 fail:
365 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
366 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
367 	dwc_gmac_free_dma_rings(sc);
368 	mutex_destroy(&sc->sc_mdio_lock);
369 
370 	return ENXIO;
371 }
372 
373 
374 
375 static int
376 dwc_gmac_reset(struct dwc_gmac_softc *sc)
377 {
378 	size_t cnt;
379 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
380 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
381 	    | GMAC_BUSMODE_RESET);
382 	for (cnt = 0; cnt < 30000; cnt++) {
383 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
384 		    & GMAC_BUSMODE_RESET) == 0)
385 			return 0;
386 		delay(10);
387 	}
388 
389 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
390 	return EIO;
391 }
392 
393 static void
394 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
395     uint8_t enaddr[ETHER_ADDR_LEN])
396 {
397 	uint32_t hi, lo;
398 
399 	hi = enaddr[4] | (enaddr[5] << 8);
400 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
401 	    | ((uint32_t)enaddr[3] << 24);
402 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
403 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
404 }
405 
406 static int
407 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
408 {
409 	struct dwc_gmac_softc * const sc = device_private(self);
410 	uint16_t mii;
411 	size_t cnt;
412 
413 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
414 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
415 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
416 	    | GMAC_MII_BUSY;
417 
418 	mutex_enter(&sc->sc_mdio_lock);
419 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
420 
421 	for (cnt = 0; cnt < 1000; cnt++) {
422 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
423 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
424 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
425 			    AWIN_GMAC_MAC_MIIDATA);
426 			break;
427 		}
428 		delay(10);
429 	}
430 
431 	mutex_exit(&sc->sc_mdio_lock);
432 
433 	if (cnt >= 1000)
434 		return ETIMEDOUT;
435 
436 	return 0;
437 }
438 
439 static int
440 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
441 {
442 	struct dwc_gmac_softc * const sc = device_private(self);
443 	uint16_t mii;
444 	size_t cnt;
445 
446 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
447 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
448 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
449 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
450 
451 	mutex_enter(&sc->sc_mdio_lock);
452 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
453 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
454 
455 	for (cnt = 0; cnt < 1000; cnt++) {
456 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
457 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
458 			break;
459 		delay(10);
460 	}
461 
462 	mutex_exit(&sc->sc_mdio_lock);
463 
464 	if (cnt >= 1000)
465 		return ETIMEDOUT;
466 
467 	return 0;
468 }
469 
470 static int
471 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
472 	struct dwc_gmac_rx_ring *ring)
473 {
474 	struct dwc_gmac_rx_data *data;
475 	bus_addr_t physaddr;
476 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
477 	int error, i, next;
478 
479 	ring->r_cur = ring->r_next = 0;
480 	memset(ring->r_desc, 0, descsize);
481 
482 	/*
483 	 * Pre-allocate Rx buffers and populate Rx ring.
484 	 */
485 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
486 		struct dwc_gmac_dev_dmadesc *desc;
487 
488 		data = &sc->sc_rxq.r_data[i];
489 
490 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
491 		if (data->rd_m == NULL) {
492 			aprint_error_dev(sc->sc_dev,
493 			    "could not allocate rx mbuf #%d\n", i);
494 			error = ENOMEM;
495 			goto fail;
496 		}
497 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
498 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
499 		if (error != 0) {
500 			aprint_error_dev(sc->sc_dev,
501 			    "could not create DMA map\n");
502 			data->rd_map = NULL;
503 			goto fail;
504 		}
505 		MCLGET(data->rd_m, M_DONTWAIT);
506 		if (!(data->rd_m->m_flags & M_EXT)) {
507 			aprint_error_dev(sc->sc_dev,
508 			    "could not allocate mbuf cluster #%d\n", i);
509 			error = ENOMEM;
510 			goto fail;
511 		}
512 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
513 		    = data->rd_m->m_ext.ext_size;
514 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
515 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
516 			    = AWGE_MAX_PACKET;
517 		}
518 
519 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
520 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
521 		if (error != 0) {
522 			aprint_error_dev(sc->sc_dev,
523 			    "could not load rx buf DMA map #%d", i);
524 			goto fail;
525 		}
526 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
527 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
528 		physaddr = data->rd_map->dm_segs[0].ds_addr;
529 
530 		desc = &sc->sc_rxq.r_desc[i];
531 		desc->ddesc_data = htole32(physaddr);
532 		next = RX_NEXT(i);
533 		desc->ddesc_next = htole32(ring->r_physaddr
534 		    + next * sizeof(*desc));
535 		sc->sc_descm->rx_init_flags(desc);
536 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
537 		sc->sc_descm->rx_set_owned_by_dev(desc);
538 	}
539 
540 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
541 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
542 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
543 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
544 	    ring->r_physaddr);
545 
546 	return 0;
547 
548 fail:
549 	dwc_gmac_free_rx_ring(sc, ring);
550 	return error;
551 }
552 
553 static void
554 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
555 	struct dwc_gmac_rx_ring *ring)
556 {
557 	struct dwc_gmac_dev_dmadesc *desc;
558 	struct dwc_gmac_rx_data *data;
559 	int i;
560 
561 	mutex_enter(&ring->r_mtx);
562 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
563 		desc = &sc->sc_rxq.r_desc[i];
564 		data = &sc->sc_rxq.r_data[i];
565 		sc->sc_descm->rx_init_flags(desc);
566 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
567 		sc->sc_descm->rx_set_owned_by_dev(desc);
568 	}
569 
570 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
571 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
572 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
573 
574 	ring->r_cur = ring->r_next = 0;
575 	/* reset DMA address to start of ring */
576 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
577 	    sc->sc_rxq.r_physaddr);
578 	mutex_exit(&ring->r_mtx);
579 }
580 
581 static int
582 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
583 {
584 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
585 		sizeof(struct dwc_gmac_dev_dmadesc);
586 	int error, nsegs;
587 	void *rings;
588 
589 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
590 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
591 	if (error != 0) {
592 		aprint_error_dev(sc->sc_dev,
593 		    "could not create desc DMA map\n");
594 		sc->sc_dma_ring_map = NULL;
595 		goto fail;
596 	}
597 
598 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
599 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
600 	if (error != 0) {
601 		aprint_error_dev(sc->sc_dev,
602 		    "could not map DMA memory\n");
603 		goto fail;
604 	}
605 
606 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
607 	    descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
608 	if (error != 0) {
609 		aprint_error_dev(sc->sc_dev,
610 		    "could not allocate DMA memory\n");
611 		goto fail;
612 	}
613 
614 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
615 	    descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
616 	if (error != 0) {
617 		aprint_error_dev(sc->sc_dev,
618 		    "could not load desc DMA map\n");
619 		goto fail;
620 	}
621 
622 	/* give first AWGE_RX_RING_COUNT to the RX side */
623 	sc->sc_rxq.r_desc = rings;
624 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
625 
626 	/* and next rings to the TX side */
627 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
628 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
629 	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
630 
631 	return 0;
632 
633 fail:
634 	dwc_gmac_free_dma_rings(sc);
635 	return error;
636 }
637 
638 static void
639 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
640 {
641 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
642 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
643 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
644 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
645 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
646 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
647 }
648 
649 static void
650 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
651 {
652 	struct dwc_gmac_rx_data *data;
653 	int i;
654 
655 	if (ring->r_desc == NULL)
656 		return;
657 
658 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
659 		data = &ring->r_data[i];
660 
661 		if (data->rd_map != NULL) {
662 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
663 			    AWGE_RX_RING_COUNT
664 				* sizeof(struct dwc_gmac_dev_dmadesc),
665 			    BUS_DMASYNC_POSTREAD);
666 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
667 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
668 		}
669 		m_freem(data->rd_m);
670 	}
671 }
672 
673 static int
674 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
675 	struct dwc_gmac_tx_ring *ring)
676 {
677 	int i, error = 0;
678 
679 	ring->t_queued = 0;
680 	ring->t_cur = ring->t_next = 0;
681 
682 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
683 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
684 	    TX_DESC_OFFSET(0),
685 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
686 	    BUS_DMASYNC_POSTWRITE);
687 
688 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
689 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
690 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
691 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
692 		    &ring->t_data[i].td_map);
693 		if (error != 0) {
694 			aprint_error_dev(sc->sc_dev,
695 			    "could not create TX DMA map #%d\n", i);
696 			ring->t_data[i].td_map = NULL;
697 			goto fail;
698 		}
699 		ring->t_desc[i].ddesc_next = htole32(
700 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
701 		    * TX_NEXT(i));
702 	}
703 
704 	return 0;
705 
706 fail:
707 	dwc_gmac_free_tx_ring(sc, ring);
708 	return error;
709 }
710 
711 static void
712 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
713 {
714 	/* 'end' is pointing one descriptor beyond the last we want to sync */
715 	if (end > start) {
716 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
717 		    TX_DESC_OFFSET(start),
718 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
719 		    ops);
720 		return;
721 	}
722 	/* sync from 'start' to end of ring */
723 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
724 	    TX_DESC_OFFSET(start),
725 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
726 	    ops);
727 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
728 		/* sync from start of ring to 'end' */
729 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
730 		    TX_DESC_OFFSET(0),
731 		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
732 		    ops);
733 	}
734 }
735 
736 static void
737 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
738 	struct dwc_gmac_tx_ring *ring)
739 {
740 	int i;
741 
742 	mutex_enter(&ring->t_mtx);
743 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
744 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
745 
746 		if (data->td_m != NULL) {
747 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
748 			    0, data->td_active->dm_mapsize,
749 			    BUS_DMASYNC_POSTWRITE);
750 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
751 			m_freem(data->td_m);
752 			data->td_m = NULL;
753 		}
754 	}
755 
756 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
757 	    TX_DESC_OFFSET(0),
758 	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
759 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
760 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
761 	    sc->sc_txq.t_physaddr);
762 
763 	ring->t_queued = 0;
764 	ring->t_cur = ring->t_next = 0;
765 	mutex_exit(&ring->t_mtx);
766 }
767 
768 static void
769 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
770 	struct dwc_gmac_tx_ring *ring)
771 {
772 	int i;
773 
774 	/* unload the maps */
775 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
776 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
777 
778 		if (data->td_m != NULL) {
779 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
780 			    0, data->td_map->dm_mapsize,
781 			    BUS_DMASYNC_POSTWRITE);
782 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
783 			m_freem(data->td_m);
784 			data->td_m = NULL;
785 		}
786 	}
787 
788 	/* and actually free them */
789 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
790 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
791 
792 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
793 	}
794 }
795 
796 static void
797 dwc_gmac_miibus_statchg(struct ifnet *ifp)
798 {
799 	struct dwc_gmac_softc * const sc = ifp->if_softc;
800 	struct mii_data * const mii = &sc->sc_mii;
801 	uint32_t conf, flow;
802 
803 	/*
804 	 * Set MII or GMII interface based on the speed
805 	 * negotiated by the PHY.
806 	 */
807 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
808 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
809 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
810 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
811 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
812 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
813 	    | AWIN_GMAC_MAC_CONF_RXENABLE
814 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
815 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
816 	case IFM_10_T:
817 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
818 		break;
819 	case IFM_100_TX:
820 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
821 			AWIN_GMAC_MAC_CONF_MIISEL;
822 		break;
823 	case IFM_1000_T:
824 		break;
825 	}
826 	if (sc->sc_set_speed)
827 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
828 
829 	flow = 0;
830 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
831 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
832 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
833 	}
834 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
835 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
836 	}
837 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
838 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
839 	}
840 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
841 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
842 
843 #ifdef DWC_GMAC_DEBUG
844 	aprint_normal_dev(sc->sc_dev,
845 	    "setting MAC conf register: %08x\n", conf);
846 #endif
847 
848 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
849 	    AWIN_GMAC_MAC_CONF, conf);
850 }
851 
852 static int
853 dwc_gmac_init(struct ifnet *ifp)
854 {
855 	struct dwc_gmac_softc *sc = ifp->if_softc;
856 
857 	mutex_enter(sc->sc_lock);
858 	int ret = dwc_gmac_init_locked(ifp);
859 	mutex_exit(sc->sc_lock);
860 
861 	return ret;
862 }
863 
864 static int
865 dwc_gmac_init_locked(struct ifnet *ifp)
866 {
867 	struct dwc_gmac_softc *sc = ifp->if_softc;
868 	uint32_t ffilt;
869 
870 	if (ifp->if_flags & IFF_RUNNING)
871 		return 0;
872 
873 	dwc_gmac_stop_locked(ifp, 0);
874 
875 	/*
876 	 * Configure DMA burst/transfer mode and RX/TX priorities.
877 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
878 	 */
879 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
880 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
881 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
882 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
883 
884 	/*
885 	 * Set up address filter
886 	 */
887 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
888 	if (ifp->if_flags & IFF_PROMISC) {
889 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
890 	} else {
891 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
892 	}
893 	if (ifp->if_flags & IFF_BROADCAST) {
894 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
895 	} else {
896 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
897 	}
898 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
899 
900 	/*
901 	 * Set up multicast filter
902 	 */
903 	dwc_gmac_setmulti(sc);
904 
905 	/*
906 	 * Set up dma pointer for RX and TX ring
907 	 */
908 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
909 	    sc->sc_rxq.r_physaddr);
910 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
911 	    sc->sc_txq.t_physaddr);
912 
913 	/*
914 	 * Start RX/TX part
915 	 */
916 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
917 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
918 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
919 	}
920 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
921 
922 	sc->sc_stopping = false;
923 
924 	ifp->if_flags |= IFF_RUNNING;
925 	sc->sc_txbusy = false;
926 
927 	return 0;
928 }
929 
930 static void
931 dwc_gmac_start(struct ifnet *ifp)
932 {
933 	struct dwc_gmac_softc *sc = ifp->if_softc;
934 #ifdef DWCGMAC_MPSAFE
935 	KASSERT(if_is_mpsafe(ifp));
936 #endif
937 
938 	mutex_enter(sc->sc_lock);
939 	if (!sc->sc_stopping) {
940 		mutex_enter(&sc->sc_txq.t_mtx);
941 		dwc_gmac_start_locked(ifp);
942 		mutex_exit(&sc->sc_txq.t_mtx);
943 	}
944 	mutex_exit(sc->sc_lock);
945 }
946 
947 static void
948 dwc_gmac_start_locked(struct ifnet *ifp)
949 {
950 	struct dwc_gmac_softc *sc = ifp->if_softc;
951 	int old = sc->sc_txq.t_queued;
952 	int start = sc->sc_txq.t_cur;
953 	struct mbuf *m0;
954 
955 	if ((ifp->if_flags & IFF_RUNNING) == 0)
956 		return;
957 	if (sc->sc_txbusy)
958 		return;
959 
960 	for (;;) {
961 		IFQ_POLL(&ifp->if_snd, m0);
962 		if (m0 == NULL)
963 			break;
964 		if (dwc_gmac_queue(sc, m0) != 0) {
965 			sc->sc_txbusy = true;
966 			break;
967 		}
968 		IFQ_DEQUEUE(&ifp->if_snd, m0);
969 		bpf_mtap(ifp, m0, BPF_D_OUT);
970 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
971 			sc->sc_txbusy = true;
972 			break;
973 		}
974 	}
975 
976 	if (sc->sc_txq.t_queued != old) {
977 		/* packets have been queued, kick it off */
978 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
979 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980 
981 #ifdef DWC_GMAC_DEBUG
982 		dwc_dump_status(sc);
983 #endif
984 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
985 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
986 	}
987 }
988 
989 static void
990 dwc_gmac_stop(struct ifnet *ifp, int disable)
991 {
992 	struct dwc_gmac_softc *sc = ifp->if_softc;
993 
994 	mutex_enter(sc->sc_lock);
995 	dwc_gmac_stop_locked(ifp, disable);
996 	mutex_exit(sc->sc_lock);
997 }
998 
999 static void
1000 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1001 {
1002 	struct dwc_gmac_softc *sc = ifp->if_softc;
1003 
1004 	sc->sc_stopping = true;
1005 
1006 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1007 	    AWIN_GMAC_DMA_OPMODE,
1008 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1009 		AWIN_GMAC_DMA_OPMODE)
1010 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1011 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1012 	    AWIN_GMAC_DMA_OPMODE,
1013 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1014 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1015 
1016 	mii_down(&sc->sc_mii);
1017 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1018 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1019 
1020 	ifp->if_flags &= ~IFF_RUNNING;
1021 	sc->sc_txbusy = false;
1022 }
1023 
1024 /*
1025  * Add m0 to the TX ring
1026  */
1027 static int
1028 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1029 {
1030 	struct dwc_gmac_dev_dmadesc *desc = NULL;
1031 	struct dwc_gmac_tx_data *data = NULL;
1032 	bus_dmamap_t map;
1033 	int error, i, first;
1034 
1035 #ifdef DWC_GMAC_DEBUG
1036 	aprint_normal_dev(sc->sc_dev,
1037 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1038 #endif
1039 
1040 	first = sc->sc_txq.t_cur;
1041 	map = sc->sc_txq.t_data[first].td_map;
1042 
1043 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1044 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1045 	if (error != 0) {
1046 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
1047 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1048 		return error;
1049 	}
1050 
1051 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1052 		bus_dmamap_unload(sc->sc_dmat, map);
1053 		return ENOBUFS;
1054 	}
1055 
1056 	for (i = 0; i < map->dm_nsegs; i++) {
1057 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1058 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1059 
1060 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1061 
1062 #ifdef DWC_GMAC_DEBUG
1063 		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1064 		    "len %lu\n", sc->sc_txq.t_cur,
1065 		    (unsigned long)map->dm_segs[i].ds_addr,
1066 		    (unsigned long)map->dm_segs[i].ds_len);
1067 #endif
1068 
1069 		sc->sc_descm->tx_init_flags(desc);
1070 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1071 
1072 		if (i == 0)
1073 			sc->sc_descm->tx_set_first_frag(desc);
1074 
1075 		/*
1076 		 * Defer passing ownership of the first descriptor
1077 		 * until we are done.
1078 		 */
1079 		if (i != 0)
1080 			sc->sc_descm->tx_set_owned_by_dev(desc);
1081 
1082 		sc->sc_txq.t_queued++;
1083 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1084 	}
1085 
1086 	sc->sc_descm->tx_set_last_frag(desc);
1087 
1088 	data->td_m = m0;
1089 	data->td_active = map;
1090 
1091 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1092 	    BUS_DMASYNC_PREWRITE);
1093 
1094 	/* Pass first to device */
1095 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1096 
1097 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1098 	    BUS_DMASYNC_PREWRITE);
1099 
1100 	return 0;
1101 }
1102 
1103 /*
1104  * If the interface is up and running, only modify the receive
1105  * filter when setting promiscuous or debug mode.  Otherwise fall
1106  * through to ether_ioctl, which will reset the chip.
1107  */
1108 static int
1109 dwc_gmac_ifflags_cb(struct ethercom *ec)
1110 {
1111 	struct ifnet *ifp = &ec->ec_if;
1112 	struct dwc_gmac_softc *sc = ifp->if_softc;
1113 	int ret = 0;
1114 
1115 	mutex_enter(sc->sc_lock);
1116 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
1117 	sc->sc_if_flags = ifp->if_flags;
1118 
1119 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1120 		ret = ENETRESET;
1121 		goto out;
1122 	}
1123 	if ((change & IFF_PROMISC) != 0) {
1124 		dwc_gmac_setmulti(sc);
1125 	}
1126 out:
1127 	mutex_exit(sc->sc_lock);
1128 
1129 	return ret;
1130 }
1131 
1132 static int
1133 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1134 {
1135 	struct dwc_gmac_softc *sc = ifp->if_softc;
1136 	int error = 0;
1137 
1138 	int s = splnet();
1139 	error = ether_ioctl(ifp, cmd, data);
1140 
1141 #ifdef DWCGMAC_MPSAFE
1142 	splx(s);
1143 #endif
1144 
1145 	if (error == ENETRESET) {
1146 		error = 0;
1147 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1148 			;
1149 		else if (ifp->if_flags & IFF_RUNNING) {
1150 			/*
1151 			 * Multicast list has changed; set the hardware filter
1152 			 * accordingly.
1153 			 */
1154 			mutex_enter(sc->sc_lock);
1155 			dwc_gmac_setmulti(sc);
1156 			mutex_exit(sc->sc_lock);
1157 		}
1158 	}
1159 
1160 	/* Try to get things going again */
1161 	if (ifp->if_flags & IFF_UP)
1162 		dwc_gmac_start(ifp);
1163 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1164 
1165 #ifndef DWCGMAC_MPSAFE
1166 	splx(s);
1167 #endif
1168 
1169 	return error;
1170 }
1171 
1172 static void
1173 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1174 {
1175 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1176 	struct dwc_gmac_tx_data *data;
1177 	struct dwc_gmac_dev_dmadesc *desc;
1178 	int i, nsegs;
1179 
1180 	mutex_enter(&sc->sc_txq.t_mtx);
1181 
1182 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1183 #ifdef DWC_GMAC_DEBUG
1184 		aprint_normal_dev(sc->sc_dev,
1185 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1186 		    i, sc->sc_txq.t_queued);
1187 #endif
1188 
1189 		/*
1190 		 * i + 1 does not need to be a valid descriptor,
1191 		 * this is just a special notion to just sync
1192 		 * a single tx descriptor (i)
1193 		 */
1194 		dwc_gmac_txdesc_sync(sc, i, i + 1,
1195 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1196 
1197 		desc = &sc->sc_txq.t_desc[i];
1198 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
1199 			break;
1200 
1201 		data = &sc->sc_txq.t_data[i];
1202 		if (data->td_m == NULL)
1203 			continue;
1204 
1205 		if_statinc(ifp, if_opackets);
1206 		nsegs = data->td_active->dm_nsegs;
1207 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1208 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1209 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1210 
1211 #ifdef DWC_GMAC_DEBUG
1212 		aprint_normal_dev(sc->sc_dev,
1213 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1214 		    "freeing mbuf %p\n", i, data->td_m);
1215 #endif
1216 
1217 		m_freem(data->td_m);
1218 		data->td_m = NULL;
1219 
1220 		sc->sc_txq.t_queued -= nsegs;
1221 	}
1222 
1223 	sc->sc_txq.t_next = i;
1224 
1225 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1226 		sc->sc_txbusy = false;
1227 	}
1228 	mutex_exit(&sc->sc_txq.t_mtx);
1229 }
1230 
1231 static void
1232 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1233 {
1234 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1235 	struct dwc_gmac_dev_dmadesc *desc;
1236 	struct dwc_gmac_rx_data *data;
1237 	bus_addr_t physaddr;
1238 	struct mbuf *m, *mnew;
1239 	int i, len, error;
1240 
1241 	mutex_enter(&sc->sc_rxq.r_mtx);
1242 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1243 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1244 		    RX_DESC_OFFSET(i), sizeof(*desc),
1245 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1246 		desc = &sc->sc_rxq.r_desc[i];
1247 		data = &sc->sc_rxq.r_data[i];
1248 
1249 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
1250 			break;
1251 
1252 		if (sc->sc_descm->rx_has_error(desc)) {
1253 #ifdef DWC_GMAC_DEBUG
1254 			aprint_normal_dev(sc->sc_dev,
1255 			    "RX error: descriptor status %08x, skipping\n",
1256 			    le32toh(desc->ddesc_status0));
1257 #endif
1258 			if_statinc(ifp, if_ierrors);
1259 			goto skip;
1260 		}
1261 
1262 		len = sc->sc_descm->rx_get_len(desc);
1263 
1264 #ifdef DWC_GMAC_DEBUG
1265 		aprint_normal_dev(sc->sc_dev,
1266 		    "rx int: device is done with descriptor #%d, len: %d\n",
1267 		    i, len);
1268 #endif
1269 
1270 		/*
1271 		 * Try to get a new mbuf before passing this one
1272 		 * up, if that fails, drop the packet and reuse
1273 		 * the existing one.
1274 		 */
1275 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1276 		if (mnew == NULL) {
1277 			if_statinc(ifp, if_ierrors);
1278 			goto skip;
1279 		}
1280 		MCLGET(mnew, M_DONTWAIT);
1281 		if ((mnew->m_flags & M_EXT) == 0) {
1282 			m_freem(mnew);
1283 			if_statinc(ifp, if_ierrors);
1284 			goto skip;
1285 		}
1286 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1287 		if (mnew->m_len > AWGE_MAX_PACKET) {
1288 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1289 		}
1290 
1291 		/* unload old DMA map */
1292 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1293 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1294 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1295 
1296 		/* and reload with new mbuf */
1297 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1298 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1299 		if (error != 0) {
1300 			m_freem(mnew);
1301 			/* try to reload old mbuf */
1302 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1303 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1304 			if (error != 0) {
1305 				panic("%s: could not load old rx mbuf",
1306 				    device_xname(sc->sc_dev));
1307 			}
1308 			if_statinc(ifp, if_ierrors);
1309 			goto skip;
1310 		}
1311 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1312 
1313 		/*
1314 		 * New mbuf loaded, update RX ring and continue
1315 		 */
1316 		m = data->rd_m;
1317 		data->rd_m = mnew;
1318 		desc->ddesc_data = htole32(physaddr);
1319 
1320 		/* finalize mbuf */
1321 		m->m_pkthdr.len = m->m_len = len;
1322 		m_set_rcvif(m, ifp);
1323 		m->m_flags |= M_HASFCS;
1324 
1325 		if_percpuq_enqueue(sc->sc_ipq, m);
1326 
1327 skip:
1328 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1329 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1330 
1331 		sc->sc_descm->rx_init_flags(desc);
1332 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1333 		sc->sc_descm->rx_set_owned_by_dev(desc);
1334 
1335 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1336 		    RX_DESC_OFFSET(i), sizeof(*desc),
1337 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1338 	}
1339 
1340 	/* update RX pointer */
1341 	sc->sc_rxq.r_cur = i;
1342 
1343 	mutex_exit(&sc->sc_rxq.r_mtx);
1344 }
1345 
1346 static void
1347 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1348 {
1349 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1350 	struct ether_multi *enm;
1351 	struct ether_multistep step;
1352 	struct ethercom *ec = &sc->sc_ec;
1353 	uint32_t hashes[2] = { 0, 0 };
1354 	uint32_t ffilt, h;
1355 	int mcnt;
1356 
1357 	KASSERT(mutex_owned(sc->sc_lock));
1358 
1359 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1360 
1361 	if (ifp->if_flags & IFF_PROMISC) {
1362 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1363 		goto special_filter;
1364 	}
1365 
1366 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1367 
1368 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1369 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1370 
1371 	ETHER_LOCK(ec);
1372 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1373 	ETHER_FIRST_MULTI(step, ec, enm);
1374 	mcnt = 0;
1375 	while (enm != NULL) {
1376 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1377 		    ETHER_ADDR_LEN) != 0) {
1378 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1379 			ec->ec_flags |= ETHER_F_ALLMULTI;
1380 			ETHER_UNLOCK(ec);
1381 			goto special_filter;
1382 		}
1383 
1384 		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1385 		hashes[h >> 5] |= (1 << (h & 0x1f));
1386 
1387 		mcnt++;
1388 		ETHER_NEXT_MULTI(step, enm);
1389 	}
1390 	ETHER_UNLOCK(ec);
1391 
1392 	if (mcnt)
1393 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1394 	else
1395 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1396 
1397 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1398 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1399 	    hashes[0]);
1400 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1401 	    hashes[1]);
1402 	sc->sc_if_flags = ifp->if_flags;
1403 
1404 #ifdef DWC_GMAC_DEBUG
1405 	dwc_gmac_dump_ffilt(sc, ffilt);
1406 #endif
1407 	return;
1408 
1409 special_filter:
1410 #ifdef DWC_GMAC_DEBUG
1411 	dwc_gmac_dump_ffilt(sc, ffilt);
1412 #endif
1413 	/* no MAC hashes, ALLMULTI or PROMISC */
1414 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1415 	    ffilt);
1416 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1417 	    0xffffffff);
1418 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1419 	    0xffffffff);
1420 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1421 }
1422 
1423 int
1424 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1425 {
1426 	uint32_t status, dma_status;
1427 	int rv = 0;
1428 
1429 	if (sc->sc_stopping)
1430 		return 0;
1431 
1432 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1433 	if (status & AWIN_GMAC_MII_IRQ) {
1434 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1435 		    AWIN_GMAC_MII_STATUS);
1436 		rv = 1;
1437 		mii_pollstat(&sc->sc_mii);
1438 	}
1439 
1440 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1441 	    AWIN_GMAC_DMA_STATUS);
1442 
1443 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1444 		rv = 1;
1445 
1446 	if (dma_status & GMAC_DMA_INT_TIE)
1447 		dwc_gmac_tx_intr(sc);
1448 
1449 	if (dma_status & GMAC_DMA_INT_RIE)
1450 		dwc_gmac_rx_intr(sc);
1451 
1452 	/*
1453 	 * Check error conditions
1454 	 */
1455 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1456 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1457 #ifdef DWC_GMAC_DEBUG
1458 		dwc_dump_and_abort(sc, "interrupt error condition");
1459 #endif
1460 	}
1461 
1462 	rnd_add_uint32(&sc->rnd_source, dma_status);
1463 
1464 	/* ack interrupt */
1465 	if (dma_status)
1466 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1467 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1468 
1469 	/*
1470 	 * Get more packets
1471 	 */
1472 	if (rv)
1473 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
1474 
1475 	return rv;
1476 }
1477 
1478 static void
1479 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1480 {
1481 
1482 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1483 }
1484 
1485 static int
1486 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1487 {
1488 
1489 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1490 }
1491 
1492 static void
1493 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1494 {
1495 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1496 
1497 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1498 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1499 }
1500 
1501 static uint32_t
1502 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1503 {
1504 
1505 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1506 }
1507 
1508 static void
1509 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1510 {
1511 
1512 	desc->ddesc_status0 = 0;
1513 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1514 }
1515 
1516 static void
1517 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1518 {
1519 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1520 
1521 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1522 }
1523 
1524 static void
1525 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1526 {
1527 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1528 
1529 	desc->ddesc_cntl1 = htole32(cntl |
1530 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1531 }
1532 
1533 static void
1534 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1535 {
1536 
1537 	desc->ddesc_status0 = 0;
1538 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1539 }
1540 
1541 static int
1542 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1543 	return !!(le32toh(desc->ddesc_status0) &
1544 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1545 }
1546 
1547 static void
1548 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1549 {
1550 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1551 
1552 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1553 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1554 }
1555 
1556 static uint32_t
1557 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1558 {
1559 
1560 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1561 }
1562 
1563 static void
1564 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1565 {
1566 
1567 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1568 	desc->ddesc_cntl1 = 0;
1569 }
1570 
1571 static void
1572 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1573 {
1574 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1575 
1576 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1577 }
1578 
1579 static void
1580 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1581 {
1582 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1583 
1584 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1585 }
1586 
1587 static void
1588 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1589 {
1590 
1591 	desc->ddesc_status0 = 0;
1592 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1593 }
1594 
1595 static int
1596 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1597 {
1598 
1599 	return !!(le32toh(desc->ddesc_status0) &
1600 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
1601 }
1602 
1603 #ifdef DWC_GMAC_DEBUG
1604 static void
1605 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1606 {
1607 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1608 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1609 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1610 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1611 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1612 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1613 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1614 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1615 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1616 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1617 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1618 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1619 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1620 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1621 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1622 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1623 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1624 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1625 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1626 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1627 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1628 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1629 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1630 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1631 }
1632 
1633 static void
1634 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1635 {
1636 	int i;
1637 
1638 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1639 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1640 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1641 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1642 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1643 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1644 		    "data: %08x next: %08x\n",
1645 		    i, sc->sc_txq.t_physaddr +
1646 			i * sizeof(struct dwc_gmac_dev_dmadesc),
1647 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1648 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1649 	}
1650 }
1651 
1652 static void
1653 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1654 {
1655 	int i;
1656 
1657 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1658 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1659 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1660 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1661 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1662 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1663 		    "data: %08x next: %08x\n",
1664 		    i, sc->sc_rxq.r_physaddr +
1665 			i * sizeof(struct dwc_gmac_dev_dmadesc),
1666 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1667 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1668 	}
1669 }
1670 
1671 static void
1672 dwc_dump_status(struct dwc_gmac_softc *sc)
1673 {
1674 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1675 	    AWIN_GMAC_MAC_INTR);
1676 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1677 	    AWIN_GMAC_DMA_STATUS);
1678 	char buf[200];
1679 
1680 	/* print interrupt state */
1681 	snprintb(buf, sizeof(buf), "\177\20"
1682 	    "b\x10""NI\0"
1683 	    "b\x0f""AI\0"
1684 	    "b\x0e""ER\0"
1685 	    "b\x0d""FB\0"
1686 	    "b\x0a""ET\0"
1687 	    "b\x09""RW\0"
1688 	    "b\x08""RS\0"
1689 	    "b\x07""RU\0"
1690 	    "b\x06""RI\0"
1691 	    "b\x05""UN\0"
1692 	    "b\x04""OV\0"
1693 	    "b\x03""TJ\0"
1694 	    "b\x02""TU\0"
1695 	    "b\x01""TS\0"
1696 	    "b\x00""TI\0"
1697 	    "\0", dma_status);
1698 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1699 	    status, buf);
1700 }
1701 
1702 static void
1703 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1704 {
1705 	dwc_dump_status(sc);
1706 	dwc_gmac_dump_ffilt(sc,
1707 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1708 	dwc_gmac_dump_dma(sc);
1709 	dwc_gmac_dump_tx_desc(sc);
1710 	dwc_gmac_dump_rx_desc(sc);
1711 
1712 	panic("%s", msg);
1713 }
1714 
1715 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1716 {
1717 	char buf[200];
1718 
1719 	/* print filter setup */
1720 	snprintb(buf, sizeof(buf), "\177\20"
1721 	    "b\x1f""RA\0"
1722 	    "b\x0a""HPF\0"
1723 	    "b\x09""SAF\0"
1724 	    "b\x08""SAIF\0"
1725 	    "b\x05""DBF\0"
1726 	    "b\x04""PM\0"
1727 	    "b\x03""DAIF\0"
1728 	    "b\x02""HMC\0"
1729 	    "b\x01""HUC\0"
1730 	    "b\x00""PR\0"
1731 	    "\0", ffilt);
1732 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1733 }
1734 #endif
1735