xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /* $NetBSD: dwc_gmac.c,v 1.75 2021/09/11 20:28:06 andvar Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.75 2021/09/11 20:28:06 andvar Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52 
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 #include <sys/rndsource.h>
61 
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_media.h>
65 #include <net/bpf.h>
66 #ifdef INET
67 #include <netinet/if_inarp.h>
68 #endif
69 
70 #include <dev/mii/miivar.h>
71 
72 #include <dev/ic/dwc_gmac_reg.h>
73 #include <dev/ic/dwc_gmac_var.h>
74 
75 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 static void dwc_gmac_miibus_statchg(struct ifnet *);
78 
79 static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 static int dwc_gmac_init(struct ifnet *);
91 static int dwc_gmac_init_locked(struct ifnet *);
92 static void dwc_gmac_stop(struct ifnet *, int);
93 static void dwc_gmac_stop_locked(struct ifnet *, int);
94 static void dwc_gmac_start(struct ifnet *);
95 static void dwc_gmac_start_locked(struct ifnet *);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t	bitrev32(uint32_t);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119 
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 	.tx_set_len = dwc_gmac_desc_std_set_len,
125 	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 	.rx_set_len = dwc_gmac_desc_std_set_len,
131 	.rx_get_len = dwc_gmac_desc_std_get_len,
132 	.rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134 
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 	.tx_set_len = dwc_gmac_desc_enh_set_len,
140 	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 	.rx_set_len = dwc_gmac_desc_enh_set_len,
146 	.rx_get_len = dwc_gmac_desc_enh_get_len,
147 	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149 
150 
151 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
152 				    *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
154 
155 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
157 
158 
159 
160 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163 
164 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 				GMAC_DMA_INT_FBE |	\
166 				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 				GMAC_DMA_INT_TJE)
169 
170 #define	AWIN_DEF_MAC_INTRMASK	\
171 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
172 	AWIN_GMAC_MAC_INT_LINKCHG)
173 
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 static void dwc_dump_status(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 #endif
182 
183 int
184 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 {
186 	uint8_t enaddr[ETHER_ADDR_LEN];
187 	uint32_t maclo, machi, ver, hwft;
188 	struct mii_data * const mii = &sc->sc_mii;
189 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 	prop_dictionary_t dict;
191 
192 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
193 	sc->sc_mii_clk = mii_clk & 7;
194 
195 	dict = device_properties(sc->sc_dev);
196 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
197 	if (ea != NULL) {
198 		/*
199 		 * If the MAC address is overridden by a device property,
200 		 * use that.
201 		 */
202 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
203 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
204 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
205 	} else {
206 		/*
207 		 * If we did not get an externaly configure address,
208 		 * try to read one from the current filter setup,
209 		 * before resetting the chip.
210 		 */
211 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212 		    AWIN_GMAC_MAC_ADDR0LO);
213 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
214 		    AWIN_GMAC_MAC_ADDR0HI);
215 
216 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
217 			/* fake MAC address */
218 			maclo = 0x00f2 | (cprng_strong32() << 16);
219 			machi = cprng_strong32();
220 		}
221 
222 		enaddr[0] = maclo & 0x0ff;
223 		enaddr[1] = (maclo >> 8) & 0x0ff;
224 		enaddr[2] = (maclo >> 16) & 0x0ff;
225 		enaddr[3] = (maclo >> 24) & 0x0ff;
226 		enaddr[4] = machi & 0x0ff;
227 		enaddr[5] = (machi >> 8) & 0x0ff;
228 	}
229 
230 	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
231 	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
232 
233 	/*
234 	 * Init chip and do initial setup
235 	 */
236 	if (dwc_gmac_reset(sc) != 0)
237 		return ENXIO;	/* not much to cleanup, haven't attached yet */
238 	dwc_gmac_write_hwaddr(sc, enaddr);
239 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
240 	    ether_sprintf(enaddr));
241 
242 	hwft = 0;
243 	if (ver >= 0x35) {
244 		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
245 		    AWIN_GMAC_DMA_HWFEATURES);
246 		aprint_normal_dev(sc->sc_dev,
247 		    "HW feature mask: %x\n", hwft);
248 	}
249 	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
250 		aprint_normal_dev(sc->sc_dev,
251 		    "Using enhanced descriptor format\n");
252 		sc->sc_descm = &desc_methods_enhanced;
253 	} else {
254 		sc->sc_descm = &desc_methods_standard;
255 	}
256 	if (hwft & GMAC_DMA_FEAT_RMON) {
257 		uint32_t val;
258 
259 		/* Mask all MMC interrupts */
260 		val = 0xffffffff;
261 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
262 		    GMAC_MMC_RX_INT_MSK, val);
263 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
264 		    GMAC_MMC_TX_INT_MSK, val);
265 	}
266 
267 	/*
268 	 * Allocate Tx and Rx rings
269 	 */
270 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
271 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
272 		goto fail;
273 	}
274 
275 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
276 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
277 		goto fail;
278 	}
279 
280 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
281 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
282 		goto fail;
283 	}
284 
285 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
286 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
287 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
288 
289 	/*
290 	 * Prepare interface data
291 	 */
292 	ifp->if_softc = sc;
293 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
294 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 #ifdef DWCGMAC_MPSAFE
296 	ifp->if_extflags = IFEF_MPSAFE;
297 #endif
298 	ifp->if_ioctl = dwc_gmac_ioctl;
299 	ifp->if_start = dwc_gmac_start;
300 	ifp->if_init = dwc_gmac_init;
301 	ifp->if_stop = dwc_gmac_stop;
302 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
303 	IFQ_SET_READY(&ifp->if_snd);
304 
305 	/*
306 	 * Attach MII subdevices
307 	 */
308 	sc->sc_ec.ec_mii = &sc->sc_mii;
309 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
310 	mii->mii_ifp = ifp;
311 	mii->mii_readreg = dwc_gmac_miibus_read_reg;
312 	mii->mii_writereg = dwc_gmac_miibus_write_reg;
313 	mii->mii_statchg = dwc_gmac_miibus_statchg;
314 	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
315 	    MIIF_DOPAUSE);
316 
317 	if (LIST_EMPTY(&mii->mii_phys)) {
318 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
319 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
320 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
321 	} else {
322 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
323 	}
324 
325 	/*
326 	 * We can support 802.1Q VLAN-sized frames.
327 	 */
328 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
329 
330 	/*
331 	 * Ready, attach interface
332 	 */
333 	/* Attach the interface. */
334 	if_initialize(ifp);
335 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
336 	if_deferred_start_init(ifp, NULL);
337 	ether_ifattach(ifp, enaddr);
338 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
339 	if_register(ifp);
340 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
341 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
342 
343 	/*
344 	 * Enable interrupts
345 	 */
346 	mutex_enter(sc->sc_lock);
347 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
348 	    AWIN_DEF_MAC_INTRMASK);
349 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
350 	    GMAC_DEF_DMA_INT_MASK);
351 	mutex_exit(sc->sc_lock);
352 
353 	return 0;
354 
355 fail:
356 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
357 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
358 	dwc_gmac_free_dma_rings(sc);
359 	mutex_destroy(&sc->sc_mdio_lock);
360 
361 	return ENXIO;
362 }
363 
364 
365 
366 static int
367 dwc_gmac_reset(struct dwc_gmac_softc *sc)
368 {
369 	size_t cnt;
370 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
371 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
372 	    | GMAC_BUSMODE_RESET);
373 	for (cnt = 0; cnt < 30000; cnt++) {
374 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
375 		    & GMAC_BUSMODE_RESET) == 0)
376 			return 0;
377 		delay(10);
378 	}
379 
380 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
381 	return EIO;
382 }
383 
384 static void
385 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
386     uint8_t enaddr[ETHER_ADDR_LEN])
387 {
388 	uint32_t hi, lo;
389 
390 	hi = enaddr[4] | (enaddr[5] << 8);
391 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
392 	    | ((uint32_t)enaddr[3] << 24);
393 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
394 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
395 }
396 
397 static int
398 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
399 {
400 	struct dwc_gmac_softc * const sc = device_private(self);
401 	uint16_t mii;
402 	size_t cnt;
403 
404 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
405 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
406 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
407 	    | GMAC_MII_BUSY;
408 
409 	mutex_enter(&sc->sc_mdio_lock);
410 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
411 
412 	for (cnt = 0; cnt < 1000; cnt++) {
413 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
414 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
415 			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
416 			    AWIN_GMAC_MAC_MIIDATA);
417 			break;
418 		}
419 		delay(10);
420 	}
421 
422 	mutex_exit(&sc->sc_mdio_lock);
423 
424 	if (cnt >= 1000)
425 		return ETIMEDOUT;
426 
427 	return 0;
428 }
429 
430 static int
431 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
432 {
433 	struct dwc_gmac_softc * const sc = device_private(self);
434 	uint16_t mii;
435 	size_t cnt;
436 
437 	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
438 	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
439 	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
440 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
441 
442 	mutex_enter(&sc->sc_mdio_lock);
443 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
444 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
445 
446 	for (cnt = 0; cnt < 1000; cnt++) {
447 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
448 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
449 			break;
450 		delay(10);
451 	}
452 
453 	mutex_exit(&sc->sc_mdio_lock);
454 
455 	if (cnt >= 1000)
456 		return ETIMEDOUT;
457 
458 	return 0;
459 }
460 
461 static int
462 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
463 	struct dwc_gmac_rx_ring *ring)
464 {
465 	struct dwc_gmac_rx_data *data;
466 	bus_addr_t physaddr;
467 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
468 	int error, i, next;
469 
470 	ring->r_cur = ring->r_next = 0;
471 	memset(ring->r_desc, 0, descsize);
472 
473 	/*
474 	 * Pre-allocate Rx buffers and populate Rx ring.
475 	 */
476 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
477 		struct dwc_gmac_dev_dmadesc *desc;
478 
479 		data = &sc->sc_rxq.r_data[i];
480 
481 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
482 		if (data->rd_m == NULL) {
483 			aprint_error_dev(sc->sc_dev,
484 			    "could not allocate rx mbuf #%d\n", i);
485 			error = ENOMEM;
486 			goto fail;
487 		}
488 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
489 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
490 		if (error != 0) {
491 			aprint_error_dev(sc->sc_dev,
492 			    "could not create DMA map\n");
493 			data->rd_map = NULL;
494 			goto fail;
495 		}
496 		MCLGET(data->rd_m, M_DONTWAIT);
497 		if (!(data->rd_m->m_flags & M_EXT)) {
498 			aprint_error_dev(sc->sc_dev,
499 			    "could not allocate mbuf cluster #%d\n", i);
500 			error = ENOMEM;
501 			goto fail;
502 		}
503 		data->rd_m->m_len = data->rd_m->m_pkthdr.len
504 		    = data->rd_m->m_ext.ext_size;
505 		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
506 			data->rd_m->m_len = data->rd_m->m_pkthdr.len
507 			    = AWGE_MAX_PACKET;
508 		}
509 
510 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
511 		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
512 		if (error != 0) {
513 			aprint_error_dev(sc->sc_dev,
514 			    "could not load rx buf DMA map #%d", i);
515 			goto fail;
516 		}
517 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
518 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
519 		physaddr = data->rd_map->dm_segs[0].ds_addr;
520 
521 		desc = &sc->sc_rxq.r_desc[i];
522 		desc->ddesc_data = htole32(physaddr);
523 		next = RX_NEXT(i);
524 		desc->ddesc_next = htole32(ring->r_physaddr
525 		    + next * sizeof(*desc));
526 		sc->sc_descm->rx_init_flags(desc);
527 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
528 		sc->sc_descm->rx_set_owned_by_dev(desc);
529 	}
530 
531 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
532 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
533 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
534 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
535 	    ring->r_physaddr);
536 
537 	return 0;
538 
539 fail:
540 	dwc_gmac_free_rx_ring(sc, ring);
541 	return error;
542 }
543 
544 static void
545 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
546 	struct dwc_gmac_rx_ring *ring)
547 {
548 	struct dwc_gmac_dev_dmadesc *desc;
549 	struct dwc_gmac_rx_data *data;
550 	int i;
551 
552 	mutex_enter(&ring->r_mtx);
553 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
554 		desc = &sc->sc_rxq.r_desc[i];
555 		data = &sc->sc_rxq.r_data[i];
556 		sc->sc_descm->rx_init_flags(desc);
557 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
558 		sc->sc_descm->rx_set_owned_by_dev(desc);
559 	}
560 
561 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
562 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
563 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
564 
565 	ring->r_cur = ring->r_next = 0;
566 	/* reset DMA address to start of ring */
567 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
568 	    sc->sc_rxq.r_physaddr);
569 	mutex_exit(&ring->r_mtx);
570 }
571 
572 static int
573 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
574 {
575 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
576 		sizeof(struct dwc_gmac_dev_dmadesc);
577 	int error, nsegs;
578 	void *rings;
579 
580 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
581 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
582 	if (error != 0) {
583 		aprint_error_dev(sc->sc_dev,
584 		    "could not create desc DMA map\n");
585 		sc->sc_dma_ring_map = NULL;
586 		goto fail;
587 	}
588 
589 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
590 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
591 	if (error != 0) {
592 		aprint_error_dev(sc->sc_dev,
593 		    "could not map DMA memory\n");
594 		goto fail;
595 	}
596 
597 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
598 	    descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
599 	if (error != 0) {
600 		aprint_error_dev(sc->sc_dev,
601 		    "could not allocate DMA memory\n");
602 		goto fail;
603 	}
604 
605 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
606 	    descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
607 	if (error != 0) {
608 		aprint_error_dev(sc->sc_dev,
609 		    "could not load desc DMA map\n");
610 		goto fail;
611 	}
612 
613 	/* give first AWGE_RX_RING_COUNT to the RX side */
614 	sc->sc_rxq.r_desc = rings;
615 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
616 
617 	/* and next rings to the TX side */
618 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
619 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
620 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
621 
622 	return 0;
623 
624 fail:
625 	dwc_gmac_free_dma_rings(sc);
626 	return error;
627 }
628 
629 static void
630 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
631 {
632 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
633 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
634 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
635 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
636 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
637 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
638 }
639 
640 static void
641 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
642 {
643 	struct dwc_gmac_rx_data *data;
644 	int i;
645 
646 	if (ring->r_desc == NULL)
647 		return;
648 
649 
650 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
651 		data = &ring->r_data[i];
652 
653 		if (data->rd_map != NULL) {
654 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
655 			    AWGE_RX_RING_COUNT
656 				*sizeof(struct dwc_gmac_dev_dmadesc),
657 			    BUS_DMASYNC_POSTREAD);
658 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
659 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
660 		}
661 		if (data->rd_m != NULL)
662 			m_freem(data->rd_m);
663 	}
664 }
665 
666 static int
667 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
668 	struct dwc_gmac_tx_ring *ring)
669 {
670 	int i, error = 0;
671 
672 	ring->t_queued = 0;
673 	ring->t_cur = ring->t_next = 0;
674 
675 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
676 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
677 	    TX_DESC_OFFSET(0),
678 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
679 	    BUS_DMASYNC_POSTWRITE);
680 
681 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
682 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
683 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
684 		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
685 		    &ring->t_data[i].td_map);
686 		if (error != 0) {
687 			aprint_error_dev(sc->sc_dev,
688 			    "could not create TX DMA map #%d\n", i);
689 			ring->t_data[i].td_map = NULL;
690 			goto fail;
691 		}
692 		ring->t_desc[i].ddesc_next = htole32(
693 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
694 		    *TX_NEXT(i));
695 	}
696 
697 	return 0;
698 
699 fail:
700 	dwc_gmac_free_tx_ring(sc, ring);
701 	return error;
702 }
703 
704 static void
705 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
706 {
707 	/* 'end' is pointing one descriptor beyond the last we want to sync */
708 	if (end > start) {
709 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
710 		    TX_DESC_OFFSET(start),
711 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
712 		    ops);
713 		return;
714 	}
715 	/* sync from 'start' to end of ring */
716 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
717 	    TX_DESC_OFFSET(start),
718 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
719 	    ops);
720 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
721 		/* sync from start of ring to 'end' */
722 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
723 		    TX_DESC_OFFSET(0),
724 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
725 		    ops);
726 	}
727 }
728 
729 static void
730 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
731 	struct dwc_gmac_tx_ring *ring)
732 {
733 	int i;
734 
735 	mutex_enter(&ring->t_mtx);
736 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
737 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
738 
739 		if (data->td_m != NULL) {
740 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
741 			    0, data->td_active->dm_mapsize,
742 			    BUS_DMASYNC_POSTWRITE);
743 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
744 			m_freem(data->td_m);
745 			data->td_m = NULL;
746 		}
747 	}
748 
749 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
750 	    TX_DESC_OFFSET(0),
751 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
752 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
753 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
754 	    sc->sc_txq.t_physaddr);
755 
756 	ring->t_queued = 0;
757 	ring->t_cur = ring->t_next = 0;
758 	mutex_exit(&ring->t_mtx);
759 }
760 
761 static void
762 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
763 	struct dwc_gmac_tx_ring *ring)
764 {
765 	int i;
766 
767 	/* unload the maps */
768 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
769 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
770 
771 		if (data->td_m != NULL) {
772 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
773 			    0, data->td_map->dm_mapsize,
774 			    BUS_DMASYNC_POSTWRITE);
775 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
776 			m_freem(data->td_m);
777 			data->td_m = NULL;
778 		}
779 	}
780 
781 	/* and actually free them */
782 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
783 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
784 
785 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
786 	}
787 }
788 
789 static void
790 dwc_gmac_miibus_statchg(struct ifnet *ifp)
791 {
792 	struct dwc_gmac_softc * const sc = ifp->if_softc;
793 	struct mii_data * const mii = &sc->sc_mii;
794 	uint32_t conf, flow;
795 
796 	/*
797 	 * Set MII or GMII interface based on the speed
798 	 * negotiated by the PHY.
799 	 */
800 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
801 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
802 	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
803 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
804 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
805 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
806 	    | AWIN_GMAC_MAC_CONF_ACS
807 	    | AWIN_GMAC_MAC_CONF_RXENABLE
808 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
809 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
810 	case IFM_10_T:
811 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
812 		break;
813 	case IFM_100_TX:
814 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
815 			AWIN_GMAC_MAC_CONF_MIISEL;
816 		break;
817 	case IFM_1000_T:
818 		break;
819 	}
820 	if (sc->sc_set_speed)
821 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
822 
823 	flow = 0;
824 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
825 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
826 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
827 	}
828 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
829 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
830 	}
831 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
832 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
833 	}
834 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
835 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
836 
837 #ifdef DWC_GMAC_DEBUG
838 	aprint_normal_dev(sc->sc_dev,
839 	    "setting MAC conf register: %08x\n", conf);
840 #endif
841 
842 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
843 	    AWIN_GMAC_MAC_CONF, conf);
844 }
845 
846 static int
847 dwc_gmac_init(struct ifnet *ifp)
848 {
849 	struct dwc_gmac_softc *sc = ifp->if_softc;
850 
851 	mutex_enter(sc->sc_lock);
852 	int ret = dwc_gmac_init_locked(ifp);
853 	mutex_exit(sc->sc_lock);
854 
855 	return ret;
856 }
857 
858 static int
859 dwc_gmac_init_locked(struct ifnet *ifp)
860 {
861 	struct dwc_gmac_softc *sc = ifp->if_softc;
862 	uint32_t ffilt;
863 
864 	if (ifp->if_flags & IFF_RUNNING)
865 		return 0;
866 
867 	dwc_gmac_stop_locked(ifp, 0);
868 
869 	/*
870 	 * Configure DMA burst/transfer mode and RX/TX priorities.
871 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
872 	 */
873 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
874 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
875 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
876 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
877 
878 	/*
879 	 * Set up address filter
880 	 */
881 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
882 	if (ifp->if_flags & IFF_PROMISC) {
883 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
884 	} else {
885 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
886 	}
887 	if (ifp->if_flags & IFF_BROADCAST) {
888 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
889 	} else {
890 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
891 	}
892 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
893 
894 	/*
895 	 * Set up multicast filter
896 	 */
897 	dwc_gmac_setmulti(sc);
898 
899 	/*
900 	 * Set up dma pointer for RX and TX ring
901 	 */
902 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
903 	    sc->sc_rxq.r_physaddr);
904 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
905 	    sc->sc_txq.t_physaddr);
906 
907 	/*
908 	 * Start RX/TX part
909 	 */
910 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
911 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
912 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
913 	}
914 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
915 
916 	sc->sc_stopping = false;
917 
918 	ifp->if_flags |= IFF_RUNNING;
919 	ifp->if_flags &= ~IFF_OACTIVE;
920 
921 	return 0;
922 }
923 
924 static void
925 dwc_gmac_start(struct ifnet *ifp)
926 {
927 	struct dwc_gmac_softc *sc = ifp->if_softc;
928 #ifdef DWCGMAC_MPSAFE
929 	KASSERT(if_is_mpsafe(ifp));
930 #endif
931 
932 	mutex_enter(sc->sc_lock);
933 	if (!sc->sc_stopping) {
934 		mutex_enter(&sc->sc_txq.t_mtx);
935 		dwc_gmac_start_locked(ifp);
936 		mutex_exit(&sc->sc_txq.t_mtx);
937 	}
938 	mutex_exit(sc->sc_lock);
939 }
940 
941 static void
942 dwc_gmac_start_locked(struct ifnet *ifp)
943 {
944 	struct dwc_gmac_softc *sc = ifp->if_softc;
945 	int old = sc->sc_txq.t_queued;
946 	int start = sc->sc_txq.t_cur;
947 	struct mbuf *m0;
948 
949 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
950 		return;
951 
952 	for (;;) {
953 		IFQ_POLL(&ifp->if_snd, m0);
954 		if (m0 == NULL)
955 			break;
956 		if (dwc_gmac_queue(sc, m0) != 0) {
957 			ifp->if_flags |= IFF_OACTIVE;
958 			break;
959 		}
960 		IFQ_DEQUEUE(&ifp->if_snd, m0);
961 		bpf_mtap(ifp, m0, BPF_D_OUT);
962 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
963 			ifp->if_flags |= IFF_OACTIVE;
964 			break;
965 		}
966 	}
967 
968 	if (sc->sc_txq.t_queued != old) {
969 		/* packets have been queued, kick it off */
970 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
971 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
972 
973 #ifdef DWC_GMAC_DEBUG
974 		dwc_dump_status(sc);
975 #endif
976 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
977 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
978 	}
979 }
980 
981 static void
982 dwc_gmac_stop(struct ifnet *ifp, int disable)
983 {
984 	struct dwc_gmac_softc *sc = ifp->if_softc;
985 
986 	mutex_enter(sc->sc_lock);
987 	dwc_gmac_stop_locked(ifp, disable);
988 	mutex_exit(sc->sc_lock);
989 }
990 
991 static void
992 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
993 {
994 	struct dwc_gmac_softc *sc = ifp->if_softc;
995 
996 	sc->sc_stopping = true;
997 
998 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
999 	    AWIN_GMAC_DMA_OPMODE,
1000 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1001 		AWIN_GMAC_DMA_OPMODE)
1002 		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1003 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1004 	    AWIN_GMAC_DMA_OPMODE,
1005 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1006 		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1007 
1008 	mii_down(&sc->sc_mii);
1009 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1010 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1011 
1012 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1013 }
1014 
1015 /*
1016  * Add m0 to the TX ring
1017  */
1018 static int
1019 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1020 {
1021 	struct dwc_gmac_dev_dmadesc *desc = NULL;
1022 	struct dwc_gmac_tx_data *data = NULL;
1023 	bus_dmamap_t map;
1024 	int error, i, first;
1025 
1026 #ifdef DWC_GMAC_DEBUG
1027 	aprint_normal_dev(sc->sc_dev,
1028 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1029 #endif
1030 
1031 	first = sc->sc_txq.t_cur;
1032 	map = sc->sc_txq.t_data[first].td_map;
1033 
1034 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1035 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1036 	if (error != 0) {
1037 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
1038 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1039 		return error;
1040 	}
1041 
1042 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1043 		bus_dmamap_unload(sc->sc_dmat, map);
1044 		return ENOBUFS;
1045 	}
1046 
1047 	for (i = 0; i < map->dm_nsegs; i++) {
1048 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1049 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1050 
1051 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1052 
1053 #ifdef DWC_GMAC_DEBUG
1054 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1055 		    "len %lu\n", sc->sc_txq.t_cur,
1056 		    (unsigned long)map->dm_segs[i].ds_addr,
1057 		    (unsigned long)map->dm_segs[i].ds_len);
1058 #endif
1059 
1060 		sc->sc_descm->tx_init_flags(desc);
1061 		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1062 
1063 		if (i == 0)
1064 			sc->sc_descm->tx_set_first_frag(desc);
1065 
1066 		/*
1067 		 * Defer passing ownership of the first descriptor
1068 		 * until we are done.
1069 		 */
1070 		if (i != 0)
1071 			sc->sc_descm->tx_set_owned_by_dev(desc);
1072 
1073 		sc->sc_txq.t_queued++;
1074 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1075 	}
1076 
1077 	sc->sc_descm->tx_set_last_frag(desc);
1078 
1079 	data->td_m = m0;
1080 	data->td_active = map;
1081 
1082 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1083 	    BUS_DMASYNC_PREWRITE);
1084 
1085 	/* Pass first to device */
1086 	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1087 
1088 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1089 	    BUS_DMASYNC_PREWRITE);
1090 
1091 	return 0;
1092 }
1093 
1094 /*
1095  * If the interface is up and running, only modify the receive
1096  * filter when setting promiscuous or debug mode.  Otherwise fall
1097  * through to ether_ioctl, which will reset the chip.
1098  */
1099 static int
1100 dwc_gmac_ifflags_cb(struct ethercom *ec)
1101 {
1102 	struct ifnet *ifp = &ec->ec_if;
1103 	struct dwc_gmac_softc *sc = ifp->if_softc;
1104 	int ret = 0;
1105 
1106 	mutex_enter(sc->sc_lock);
1107 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
1108 	sc->sc_if_flags = ifp->if_flags;
1109 
1110 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1111 		ret = ENETRESET;
1112 		goto out;
1113 	}
1114 	if ((change & IFF_PROMISC) != 0) {
1115 		dwc_gmac_setmulti(sc);
1116 	}
1117 out:
1118 	mutex_exit(sc->sc_lock);
1119 
1120 	return ret;
1121 }
1122 
1123 static int
1124 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1125 {
1126 	struct dwc_gmac_softc *sc = ifp->if_softc;
1127 	int error = 0;
1128 
1129 	int s = splnet();
1130 	error = ether_ioctl(ifp, cmd, data);
1131 
1132 #ifdef DWCGMAC_MPSAFE
1133 	splx(s);
1134 #endif
1135 
1136 	if (error == ENETRESET) {
1137 		error = 0;
1138 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1139 			;
1140 		else if (ifp->if_flags & IFF_RUNNING) {
1141 			/*
1142 			 * Multicast list has changed; set the hardware filter
1143 			 * accordingly.
1144 			 */
1145 			mutex_enter(sc->sc_lock);
1146 			dwc_gmac_setmulti(sc);
1147 			mutex_exit(sc->sc_lock);
1148 		}
1149 	}
1150 
1151 	/* Try to get things going again */
1152 	if (ifp->if_flags & IFF_UP)
1153 		dwc_gmac_start(ifp);
1154 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1155 
1156 #ifndef DWCGMAC_MPSAFE
1157 	splx(s);
1158 #endif
1159 
1160 	return error;
1161 }
1162 
1163 static void
1164 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1165 {
1166 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1167 	struct dwc_gmac_tx_data *data;
1168 	struct dwc_gmac_dev_dmadesc *desc;
1169 	int i, nsegs;
1170 
1171 	mutex_enter(&sc->sc_txq.t_mtx);
1172 
1173 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1174 #ifdef DWC_GMAC_DEBUG
1175 		aprint_normal_dev(sc->sc_dev,
1176 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1177 		    i, sc->sc_txq.t_queued);
1178 #endif
1179 
1180 		/*
1181 		 * i+1 does not need to be a valid descriptor,
1182 		 * this is just a special notion to just sync
1183 		 * a single tx descriptor (i)
1184 		 */
1185 		dwc_gmac_txdesc_sync(sc, i, i+1,
1186 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1187 
1188 		desc = &sc->sc_txq.t_desc[i];
1189 		if (sc->sc_descm->tx_is_owned_by_dev(desc))
1190 			break;
1191 
1192 		data = &sc->sc_txq.t_data[i];
1193 		if (data->td_m == NULL)
1194 			continue;
1195 
1196 		if_statinc(ifp, if_opackets);
1197 		nsegs = data->td_active->dm_nsegs;
1198 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1199 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1200 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1201 
1202 #ifdef DWC_GMAC_DEBUG
1203 		aprint_normal_dev(sc->sc_dev,
1204 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1205 		    "freeing mbuf %p\n", i, data->td_m);
1206 #endif
1207 
1208 		m_freem(data->td_m);
1209 		data->td_m = NULL;
1210 
1211 		sc->sc_txq.t_queued -= nsegs;
1212 	}
1213 
1214 	sc->sc_txq.t_next = i;
1215 
1216 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1217 		ifp->if_flags &= ~IFF_OACTIVE;
1218 	}
1219 	mutex_exit(&sc->sc_txq.t_mtx);
1220 }
1221 
1222 static void
1223 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1224 {
1225 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1226 	struct dwc_gmac_dev_dmadesc *desc;
1227 	struct dwc_gmac_rx_data *data;
1228 	bus_addr_t physaddr;
1229 	struct mbuf *m, *mnew;
1230 	int i, len, error;
1231 
1232 	mutex_enter(&sc->sc_rxq.r_mtx);
1233 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1234 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1235 		    RX_DESC_OFFSET(i), sizeof(*desc),
1236 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1237 		desc = &sc->sc_rxq.r_desc[i];
1238 		data = &sc->sc_rxq.r_data[i];
1239 
1240 		if (sc->sc_descm->rx_is_owned_by_dev(desc))
1241 			break;
1242 
1243 		if (sc->sc_descm->rx_has_error(desc)) {
1244 #ifdef DWC_GMAC_DEBUG
1245 			aprint_normal_dev(sc->sc_dev,
1246 			    "RX error: descriptor status %08x, skipping\n",
1247 			    le32toh(desc->ddesc_status0));
1248 #endif
1249 			if_statinc(ifp, if_ierrors);
1250 			goto skip;
1251 		}
1252 
1253 		len = sc->sc_descm->rx_get_len(desc);
1254 
1255 #ifdef DWC_GMAC_DEBUG
1256 		aprint_normal_dev(sc->sc_dev,
1257 		    "rx int: device is done with descriptor #%d, len: %d\n",
1258 		    i, len);
1259 #endif
1260 
1261 		/*
1262 		 * Try to get a new mbuf before passing this one
1263 		 * up, if that fails, drop the packet and reuse
1264 		 * the existing one.
1265 		 */
1266 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1267 		if (mnew == NULL) {
1268 			if_statinc(ifp, if_ierrors);
1269 			goto skip;
1270 		}
1271 		MCLGET(mnew, M_DONTWAIT);
1272 		if ((mnew->m_flags & M_EXT) == 0) {
1273 			m_freem(mnew);
1274 			if_statinc(ifp, if_ierrors);
1275 			goto skip;
1276 		}
1277 		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1278 		if (mnew->m_len > AWGE_MAX_PACKET) {
1279 			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1280 		}
1281 
1282 		/* unload old DMA map */
1283 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1284 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1285 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1286 
1287 		/* and reload with new mbuf */
1288 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1289 		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1290 		if (error != 0) {
1291 			m_freem(mnew);
1292 			/* try to reload old mbuf */
1293 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1294 			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1295 			if (error != 0) {
1296 				panic("%s: could not load old rx mbuf",
1297 				    device_xname(sc->sc_dev));
1298 			}
1299 			if_statinc(ifp, if_ierrors);
1300 			goto skip;
1301 		}
1302 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1303 
1304 		/*
1305 		 * New mbuf loaded, update RX ring and continue
1306 		 */
1307 		m = data->rd_m;
1308 		data->rd_m = mnew;
1309 		desc->ddesc_data = htole32(physaddr);
1310 
1311 		/* finalize mbuf */
1312 		m->m_pkthdr.len = m->m_len = len;
1313 		m_set_rcvif(m, ifp);
1314 		m->m_flags |= M_HASFCS;
1315 
1316 		if_percpuq_enqueue(sc->sc_ipq, m);
1317 
1318 skip:
1319 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1320 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1321 
1322 		sc->sc_descm->rx_init_flags(desc);
1323 		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1324 		sc->sc_descm->rx_set_owned_by_dev(desc);
1325 
1326 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1327 		    RX_DESC_OFFSET(i), sizeof(*desc),
1328 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1329 	}
1330 
1331 	/* update RX pointer */
1332 	sc->sc_rxq.r_cur = i;
1333 
1334 	mutex_exit(&sc->sc_rxq.r_mtx);
1335 }
1336 
1337 /*
1338  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1339  */
1340 static uint32_t
1341 bitrev32(uint32_t x)
1342 {
1343 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1344 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1345 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1346 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1347 
1348 	return (x >> 16) | (x << 16);
1349 }
1350 
1351 static void
1352 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1353 {
1354 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1355 	struct ether_multi *enm;
1356 	struct ether_multistep step;
1357 	struct ethercom *ec = &sc->sc_ec;
1358 	uint32_t hashes[2] = { 0, 0 };
1359 	uint32_t ffilt, h;
1360 	int mcnt;
1361 
1362 	KASSERT(mutex_owned(sc->sc_lock));
1363 
1364 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1365 
1366 	if (ifp->if_flags & IFF_PROMISC) {
1367 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1368 		goto special_filter;
1369 	}
1370 
1371 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1372 
1373 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1374 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1375 
1376 	ETHER_LOCK(ec);
1377 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1378 	ETHER_FIRST_MULTI(step, ec, enm);
1379 	mcnt = 0;
1380 	while (enm != NULL) {
1381 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1382 		    ETHER_ADDR_LEN) != 0) {
1383 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1384 			ec->ec_flags |= ETHER_F_ALLMULTI;
1385 			ETHER_UNLOCK(ec);
1386 			goto special_filter;
1387 		}
1388 
1389 		h = bitrev32(
1390 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1391 		    ) >> 26;
1392 		hashes[h >> 5] |= (1 << (h & 0x1f));
1393 
1394 		mcnt++;
1395 		ETHER_NEXT_MULTI(step, enm);
1396 	}
1397 	ETHER_UNLOCK(ec);
1398 
1399 	if (mcnt)
1400 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1401 	else
1402 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1403 
1404 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1405 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1406 	    hashes[0]);
1407 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1408 	    hashes[1]);
1409 	sc->sc_if_flags = ifp->if_flags;
1410 
1411 #ifdef DWC_GMAC_DEBUG
1412 	dwc_gmac_dump_ffilt(sc, ffilt);
1413 #endif
1414 	return;
1415 
1416 special_filter:
1417 #ifdef DWC_GMAC_DEBUG
1418 	dwc_gmac_dump_ffilt(sc, ffilt);
1419 #endif
1420 	/* no MAC hashes, ALLMULTI or PROMISC */
1421 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1422 	    ffilt);
1423 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1424 	    0xffffffff);
1425 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1426 	    0xffffffff);
1427 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1428 }
1429 
1430 int
1431 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1432 {
1433 	uint32_t status, dma_status;
1434 	int rv = 0;
1435 
1436 	if (sc->sc_stopping)
1437 		return 0;
1438 
1439 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1440 	if (status & AWIN_GMAC_MII_IRQ) {
1441 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1442 		    AWIN_GMAC_MII_STATUS);
1443 		rv = 1;
1444 		mii_pollstat(&sc->sc_mii);
1445 	}
1446 
1447 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1448 	    AWIN_GMAC_DMA_STATUS);
1449 
1450 	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1451 		rv = 1;
1452 
1453 	if (dma_status & GMAC_DMA_INT_TIE)
1454 		dwc_gmac_tx_intr(sc);
1455 
1456 	if (dma_status & GMAC_DMA_INT_RIE)
1457 		dwc_gmac_rx_intr(sc);
1458 
1459 	/*
1460 	 * Check error conditions
1461 	 */
1462 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1463 		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1464 #ifdef DWC_GMAC_DEBUG
1465 		dwc_dump_and_abort(sc, "interrupt error condition");
1466 #endif
1467 	}
1468 
1469 	rnd_add_uint32(&sc->rnd_source, dma_status);
1470 
1471 	/* ack interrupt */
1472 	if (dma_status)
1473 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1474 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1475 
1476 	/*
1477 	 * Get more packets
1478 	 */
1479 	if (rv)
1480 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
1481 
1482 	return rv;
1483 }
1484 
1485 static void
1486 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1487 {
1488 
1489 	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1490 }
1491 
1492 static int
1493 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1494 {
1495 
1496 	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1497 }
1498 
1499 static void
1500 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1501 {
1502 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1503 
1504 	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1505 		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1506 }
1507 
1508 static uint32_t
1509 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1510 {
1511 
1512 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1513 }
1514 
1515 static void
1516 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1517 {
1518 
1519 	desc->ddesc_status0 = 0;
1520 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1521 }
1522 
1523 static void
1524 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1525 {
1526 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1527 
1528 	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1529 }
1530 
1531 static void
1532 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1533 {
1534 	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1535 
1536 	desc->ddesc_cntl1 = htole32(cntl |
1537 		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1538 }
1539 
1540 static void
1541 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1542 {
1543 
1544 	desc->ddesc_status0 = 0;
1545 	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1546 }
1547 
1548 static int
1549 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1550 	return !!(le32toh(desc->ddesc_status0) &
1551 		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1552 }
1553 
1554 static void
1555 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1556 {
1557 	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1558 
1559 	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1560 		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1561 }
1562 
1563 static uint32_t
1564 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1565 {
1566 
1567 	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1568 }
1569 
1570 static void
1571 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1572 {
1573 
1574 	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1575 	desc->ddesc_cntl1 = 0;
1576 }
1577 
1578 static void
1579 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1580 {
1581 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1582 
1583 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1584 }
1585 
1586 static void
1587 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1588 {
1589 	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1590 
1591 	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1592 }
1593 
1594 static void
1595 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1596 {
1597 
1598 	desc->ddesc_status0 = 0;
1599 	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1600 }
1601 
1602 static int
1603 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1604 {
1605 
1606 	return !!(le32toh(desc->ddesc_status0) &
1607 		(DDESC_RDES0_ES | DDESC_RDES0_LE));
1608 }
1609 
1610 #ifdef DWC_GMAC_DEBUG
1611 static void
1612 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1613 {
1614 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1615 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1616 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1617 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1618 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1619 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1620 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1621 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1622 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1623 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1624 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1625 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1626 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1627 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1628 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1629 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1630 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1631 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1632 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1633 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1634 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1635 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1636 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1637 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1638 }
1639 
1640 static void
1641 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1642 {
1643 	int i;
1644 
1645 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1646 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1647 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1648 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1649 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1650 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1651 		    "data: %08x next: %08x\n",
1652 		    i, sc->sc_txq.t_physaddr +
1653 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1654 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1655 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1656 	}
1657 }
1658 
1659 static void
1660 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1661 {
1662 	int i;
1663 
1664 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1665 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1666 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1667 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1668 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1669 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1670 		    "data: %08x next: %08x\n",
1671 		    i, sc->sc_rxq.r_physaddr +
1672 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1673 		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1674 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1675 	}
1676 }
1677 
1678 static void
1679 dwc_dump_status(struct dwc_gmac_softc *sc)
1680 {
1681 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1682 	     AWIN_GMAC_MAC_INTR);
1683 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1684 	     AWIN_GMAC_DMA_STATUS);
1685 	char buf[200];
1686 
1687 	/* print interrupt state */
1688 	snprintb(buf, sizeof(buf), "\177\20"
1689 	    "b\x10""NI\0"
1690 	    "b\x0f""AI\0"
1691 	    "b\x0e""ER\0"
1692 	    "b\x0d""FB\0"
1693 	    "b\x0a""ET\0"
1694 	    "b\x09""RW\0"
1695 	    "b\x08""RS\0"
1696 	    "b\x07""RU\0"
1697 	    "b\x06""RI\0"
1698 	    "b\x05""UN\0"
1699 	    "b\x04""OV\0"
1700 	    "b\x03""TJ\0"
1701 	    "b\x02""TU\0"
1702 	    "b\x01""TS\0"
1703 	    "b\x00""TI\0"
1704 	    "\0", dma_status);
1705 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1706 	    status, buf);
1707 }
1708 
1709 static void
1710 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1711 {
1712 	dwc_dump_status(sc);
1713 	dwc_gmac_dump_ffilt(sc,
1714 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1715 	dwc_gmac_dump_dma(sc);
1716 	dwc_gmac_dump_tx_desc(sc);
1717 	dwc_gmac_dump_rx_desc(sc);
1718 
1719 	panic("%s", msg);
1720 }
1721 
1722 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1723 {
1724 	char buf[200];
1725 
1726 	/* print filter setup */
1727 	snprintb(buf, sizeof(buf), "\177\20"
1728 	    "b\x1f""RA\0"
1729 	    "b\x0a""HPF\0"
1730 	    "b\x09""SAF\0"
1731 	    "b\x08""SAIF\0"
1732 	    "b\x05""DBF\0"
1733 	    "b\x04""PM\0"
1734 	    "b\x03""DAIF\0"
1735 	    "b\x02""HMC\0"
1736 	    "b\x01""HUC\0"
1737 	    "b\x00""PR\0"
1738 	    "\0", ffilt);
1739 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1740 }
1741 #endif
1742