xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* $NetBSD: dwc_gmac.c,v 1.51 2018/06/30 16:27:48 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver supports the Synopsis Designware GMAC core, as found
34  * on Allwinner A20 cores and others.
35  *
36  * Real documentation seems to not be available, the marketing product
37  * documents could be found here:
38  *
39  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40  */
41 
42 #include <sys/cdefs.h>
43 
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.51 2018/06/30 16:27:48 jmcneill Exp $");
45 
46 /* #define	DWC_GMAC_DEBUG	1 */
47 
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52 
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68 
69 #include <dev/mii/miivar.h>
70 
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73 
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77 
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 			 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t	bitrev32(uint32_t x);
103 
104 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
105 				    *sizeof(struct dwc_gmac_dev_dmadesc))
106 #define	TX_NEXT(N)		(((N)+1) & (AWGE_TX_RING_COUNT-1))
107 
108 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 #define	RX_NEXT(N)		(((N)+1) & (AWGE_RX_RING_COUNT-1))
110 
111 
112 
113 #define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 				GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 				GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116 
117 #define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 				GMAC_DMA_INT_FBE|	\
119 				GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 				GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 				GMAC_DMA_INT_TJE)
122 
123 #define	AWIN_DEF_MAC_INTRMASK	\
124 	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
125 	AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126 
127 #ifdef DWC_GMAC_DEBUG
128 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
129 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
130 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
131 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
132 static void dwc_dump_status(struct dwc_gmac_softc *sc);
133 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
134 #endif
135 
136 #ifdef NET_MPSAFE
137 #define DWCGMAC_MPSAFE	1
138 #endif
139 
140 int
141 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
142 {
143 	uint8_t enaddr[ETHER_ADDR_LEN];
144 	uint32_t maclo, machi;
145 	struct mii_data * const mii = &sc->sc_mii;
146 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
147 	prop_dictionary_t dict;
148 	int rv;
149 
150 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
151 	sc->sc_mii_clk = mii_clk & 7;
152 
153 	dict = device_properties(sc->sc_dev);
154 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
155 	if (ea != NULL) {
156 		/*
157 		 * If the MAC address is overriden by a device property,
158 		 * use that.
159 		 */
160 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
161 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
162 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
163 	} else {
164 		/*
165 		 * If we did not get an externaly configure address,
166 		 * try to read one from the current filter setup,
167 		 * before resetting the chip.
168 		 */
169 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
170 		    AWIN_GMAC_MAC_ADDR0LO);
171 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
172 		    AWIN_GMAC_MAC_ADDR0HI);
173 
174 		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
175 			/* fake MAC address */
176 			maclo = 0x00f2 | (cprng_strong32() << 16);
177 			machi = cprng_strong32();
178 		}
179 
180 		enaddr[0] = maclo & 0x0ff;
181 		enaddr[1] = (maclo >> 8) & 0x0ff;
182 		enaddr[2] = (maclo >> 16) & 0x0ff;
183 		enaddr[3] = (maclo >> 24) & 0x0ff;
184 		enaddr[4] = machi & 0x0ff;
185 		enaddr[5] = (machi >> 8) & 0x0ff;
186 	}
187 
188 	/*
189 	 * Init chip and do initial setup
190 	 */
191 	if (dwc_gmac_reset(sc) != 0)
192 		return ENXIO;	/* not much to cleanup, haven't attached yet */
193 	dwc_gmac_write_hwaddr(sc, enaddr);
194 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
195 	    ether_sprintf(enaddr));
196 
197 	/*
198 	 * Allocate Tx and Rx rings
199 	 */
200 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
201 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
202 		goto fail;
203 	}
204 
205 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
206 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
207 		goto fail;
208 	}
209 
210 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
211 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
212 		goto fail;
213 	}
214 
215 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
216 	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
217 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
218 
219 	/*
220 	 * Prepare interface data
221 	 */
222 	ifp->if_softc = sc;
223 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
224 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
225 #ifdef DWCGMAC_MPSAFE
226 	ifp->if_extflags = IFEF_MPSAFE;
227 #endif
228 	ifp->if_ioctl = dwc_gmac_ioctl;
229 	ifp->if_start = dwc_gmac_start;
230 	ifp->if_init = dwc_gmac_init;
231 	ifp->if_stop = dwc_gmac_stop;
232 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
233 	IFQ_SET_READY(&ifp->if_snd);
234 
235 	/*
236 	 * Attach MII subdevices
237 	 */
238 	sc->sc_ec.ec_mii = &sc->sc_mii;
239 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
240         mii->mii_ifp = ifp;
241         mii->mii_readreg = dwc_gmac_miibus_read_reg;
242         mii->mii_writereg = dwc_gmac_miibus_write_reg;
243         mii->mii_statchg = dwc_gmac_miibus_statchg;
244         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
245 	    MIIF_DOPAUSE);
246 
247         if (LIST_EMPTY(&mii->mii_phys)) {
248                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
249                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
250                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
251         } else {
252                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
253         }
254 
255 	/*
256 	 * We can support 802.1Q VLAN-sized frames.
257 	 */
258 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
259 
260 	/*
261 	 * Ready, attach interface
262 	 */
263 	/* Attach the interface. */
264 	rv = if_initialize(ifp);
265 	if (rv != 0)
266 		goto fail_2;
267 	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
268 	if_deferred_start_init(ifp, NULL);
269 	ether_ifattach(ifp, enaddr);
270 	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
271 	if_register(ifp);
272 
273 	/*
274 	 * Enable interrupts
275 	 */
276 	mutex_enter(sc->sc_lock);
277 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
278 	    AWIN_DEF_MAC_INTRMASK);
279 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
280 	    GMAC_DEF_DMA_INT_MASK);
281 	mutex_exit(sc->sc_lock);
282 
283 	return 0;
284 
285 fail_2:
286 	ifmedia_removeall(&mii->mii_media);
287 	mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
288 	mutex_destroy(&sc->sc_txq.t_mtx);
289 	mutex_destroy(&sc->sc_rxq.r_mtx);
290 	mutex_obj_free(sc->sc_lock);
291 fail:
292 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
293 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
294 	dwc_gmac_free_dma_rings(sc);
295 	mutex_destroy(&sc->sc_mdio_lock);
296 
297 	return ENXIO;
298 }
299 
300 
301 
302 static int
303 dwc_gmac_reset(struct dwc_gmac_softc *sc)
304 {
305 	size_t cnt;
306 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
307 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
308 	for (cnt = 0; cnt < 3000; cnt++) {
309 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
310 		    & GMAC_BUSMODE_RESET) == 0)
311 			return 0;
312 		delay(10);
313 	}
314 
315 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
316 	return EIO;
317 }
318 
319 static void
320 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
321     uint8_t enaddr[ETHER_ADDR_LEN])
322 {
323 	uint32_t hi, lo;
324 
325 	hi = enaddr[4] | (enaddr[5] << 8);
326 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
327 	    | (enaddr[3] << 24);
328 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
329 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
330 }
331 
332 static int
333 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
334 {
335 	struct dwc_gmac_softc * const sc = device_private(self);
336 	uint16_t mii;
337 	size_t cnt;
338 	int rv = 0;
339 
340 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
341 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
342 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
343 	    | GMAC_MII_BUSY;
344 
345 	mutex_enter(&sc->sc_mdio_lock);
346 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
347 
348 	for (cnt = 0; cnt < 1000; cnt++) {
349 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
350 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
351 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
352 			    AWIN_GMAC_MAC_MIIDATA);
353 			break;
354 		}
355 		delay(10);
356 	}
357 
358 	mutex_exit(&sc->sc_mdio_lock);
359 
360 	return rv;
361 }
362 
363 static void
364 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
365 {
366 	struct dwc_gmac_softc * const sc = device_private(self);
367 	uint16_t mii;
368 	size_t cnt;
369 
370 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
371 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
372 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
373 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
374 
375 	mutex_enter(&sc->sc_mdio_lock);
376 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
377 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
378 
379 	for (cnt = 0; cnt < 1000; cnt++) {
380 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
381 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
382 			break;
383 		delay(10);
384 	}
385 
386 	mutex_exit(&sc->sc_mdio_lock);
387 }
388 
389 static int
390 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
391 	struct dwc_gmac_rx_ring *ring)
392 {
393 	struct dwc_gmac_rx_data *data;
394 	bus_addr_t physaddr;
395 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
396 	int error, i, next;
397 
398 	ring->r_cur = ring->r_next = 0;
399 	memset(ring->r_desc, 0, descsize);
400 
401 	/*
402 	 * Pre-allocate Rx buffers and populate Rx ring.
403 	 */
404 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
405 		struct dwc_gmac_dev_dmadesc *desc;
406 
407 		data = &sc->sc_rxq.r_data[i];
408 
409 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
410 		if (data->rd_m == NULL) {
411 			aprint_error_dev(sc->sc_dev,
412 			    "could not allocate rx mbuf #%d\n", i);
413 			error = ENOMEM;
414 			goto fail;
415 		}
416 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
417 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
418 		if (error != 0) {
419 			aprint_error_dev(sc->sc_dev,
420 			    "could not create DMA map\n");
421 			data->rd_map = NULL;
422 			goto fail;
423 		}
424 		MCLGET(data->rd_m, M_DONTWAIT);
425 		if (!(data->rd_m->m_flags & M_EXT)) {
426 			aprint_error_dev(sc->sc_dev,
427 			    "could not allocate mbuf cluster #%d\n", i);
428 			error = ENOMEM;
429 			goto fail;
430 		}
431 
432 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
433 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
434 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
435 		if (error != 0) {
436 			aprint_error_dev(sc->sc_dev,
437 			    "could not load rx buf DMA map #%d", i);
438 			goto fail;
439 		}
440 		physaddr = data->rd_map->dm_segs[0].ds_addr;
441 
442 		desc = &sc->sc_rxq.r_desc[i];
443 		desc->ddesc_data = htole32(physaddr);
444 		next = RX_NEXT(i);
445 		desc->ddesc_next = htole32(ring->r_physaddr
446 		    + next * sizeof(*desc));
447 		desc->ddesc_cntl = htole32(
448 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
449 		    DDESC_CNTL_RXCHAIN);
450 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
451 	}
452 
453 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
454 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
455 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
456 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
457 	    ring->r_physaddr);
458 
459 	return 0;
460 
461 fail:
462 	dwc_gmac_free_rx_ring(sc, ring);
463 	return error;
464 }
465 
466 static void
467 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
468 	struct dwc_gmac_rx_ring *ring)
469 {
470 	struct dwc_gmac_dev_dmadesc *desc;
471 	int i;
472 
473 	mutex_enter(&ring->r_mtx);
474 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
475 		desc = &sc->sc_rxq.r_desc[i];
476 		desc->ddesc_cntl = htole32(
477 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
478 		    DDESC_CNTL_RXCHAIN);
479 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
480 	}
481 
482 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
483 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
484 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
485 
486 	ring->r_cur = ring->r_next = 0;
487 	/* reset DMA address to start of ring */
488 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
489 	    sc->sc_rxq.r_physaddr);
490 	mutex_exit(&ring->r_mtx);
491 }
492 
493 static int
494 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
495 {
496 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
497 		sizeof(struct dwc_gmac_dev_dmadesc);
498 	int error, nsegs;
499 	void *rings;
500 
501 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
502 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
503 	if (error != 0) {
504 		aprint_error_dev(sc->sc_dev,
505 		    "could not create desc DMA map\n");
506 		sc->sc_dma_ring_map = NULL;
507 		goto fail;
508 	}
509 
510 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
511 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
512 	if (error != 0) {
513 		aprint_error_dev(sc->sc_dev,
514 		    "could not map DMA memory\n");
515 		goto fail;
516 	}
517 
518 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
519 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
520 	if (error != 0) {
521 		aprint_error_dev(sc->sc_dev,
522 		    "could not allocate DMA memory\n");
523 		goto fail;
524 	}
525 
526 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
527 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
528 	if (error != 0) {
529 		aprint_error_dev(sc->sc_dev,
530 		    "could not load desc DMA map\n");
531 		goto fail;
532 	}
533 
534 	/* give first AWGE_RX_RING_COUNT to the RX side */
535 	sc->sc_rxq.r_desc = rings;
536 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
537 
538 	/* and next rings to the TX side */
539 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
540 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
541 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
542 
543 	return 0;
544 
545 fail:
546 	dwc_gmac_free_dma_rings(sc);
547 	return error;
548 }
549 
550 static void
551 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
552 {
553 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
554 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
555 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
556 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
557 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
558 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
559 }
560 
561 static void
562 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
563 {
564 	struct dwc_gmac_rx_data *data;
565 	int i;
566 
567 	if (ring->r_desc == NULL)
568 		return;
569 
570 
571 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
572 		data = &ring->r_data[i];
573 
574 		if (data->rd_map != NULL) {
575 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
576 			    AWGE_RX_RING_COUNT
577 				*sizeof(struct dwc_gmac_dev_dmadesc),
578 			    BUS_DMASYNC_POSTREAD);
579 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
580 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
581 		}
582 		if (data->rd_m != NULL)
583 			m_freem(data->rd_m);
584 	}
585 }
586 
587 static int
588 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
589 	struct dwc_gmac_tx_ring *ring)
590 {
591 	int i, error = 0;
592 
593 	ring->t_queued = 0;
594 	ring->t_cur = ring->t_next = 0;
595 
596 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
597 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
598 	    TX_DESC_OFFSET(0),
599 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
600 	    BUS_DMASYNC_POSTWRITE);
601 
602 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
603 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
604 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
605 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
606 		    &ring->t_data[i].td_map);
607 		if (error != 0) {
608 			aprint_error_dev(sc->sc_dev,
609 			    "could not create TX DMA map #%d\n", i);
610 			ring->t_data[i].td_map = NULL;
611 			goto fail;
612 		}
613 		ring->t_desc[i].ddesc_next = htole32(
614 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
615 		    *TX_NEXT(i));
616 	}
617 
618 	return 0;
619 
620 fail:
621 	dwc_gmac_free_tx_ring(sc, ring);
622 	return error;
623 }
624 
625 static void
626 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
627 {
628 	/* 'end' is pointing one descriptor beyound the last we want to sync */
629 	if (end > start) {
630 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
631 		    TX_DESC_OFFSET(start),
632 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
633 		    ops);
634 		return;
635 	}
636 	/* sync from 'start' to end of ring */
637 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
638 	    TX_DESC_OFFSET(start),
639 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
640 	    ops);
641 	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
642 		/* sync from start of ring to 'end' */
643 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
644 		    TX_DESC_OFFSET(0),
645 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
646 		    ops);
647 	}
648 }
649 
650 static void
651 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
652 	struct dwc_gmac_tx_ring *ring)
653 {
654 	int i;
655 
656 	mutex_enter(&ring->t_mtx);
657 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
658 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
659 
660 		if (data->td_m != NULL) {
661 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
662 			    0, data->td_active->dm_mapsize,
663 			    BUS_DMASYNC_POSTWRITE);
664 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
665 			m_freem(data->td_m);
666 			data->td_m = NULL;
667 		}
668 	}
669 
670 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
671 	    TX_DESC_OFFSET(0),
672 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
673 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
674 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
675 	    sc->sc_txq.t_physaddr);
676 
677 	ring->t_queued = 0;
678 	ring->t_cur = ring->t_next = 0;
679 	mutex_exit(&ring->t_mtx);
680 }
681 
682 static void
683 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
684 	struct dwc_gmac_tx_ring *ring)
685 {
686 	int i;
687 
688 	/* unload the maps */
689 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
690 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
691 
692 		if (data->td_m != NULL) {
693 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
694 			    0, data->td_map->dm_mapsize,
695 			    BUS_DMASYNC_POSTWRITE);
696 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
697 			m_freem(data->td_m);
698 			data->td_m = NULL;
699 		}
700 	}
701 
702 	/* and actually free them */
703 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
704 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
705 
706 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
707 	}
708 }
709 
710 static void
711 dwc_gmac_miibus_statchg(struct ifnet *ifp)
712 {
713 	struct dwc_gmac_softc * const sc = ifp->if_softc;
714 	struct mii_data * const mii = &sc->sc_mii;
715 	uint32_t conf, flow;
716 
717 	/*
718 	 * Set MII or GMII interface based on the speed
719 	 * negotiated by the PHY.
720 	 */
721 	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
722 	conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
723 	    |AWIN_GMAC_MAC_CONF_FULLDPLX);
724 	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
725 	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
726 	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
727 	    | AWIN_GMAC_MAC_CONF_ACS
728 	    | AWIN_GMAC_MAC_CONF_RXENABLE
729 	    | AWIN_GMAC_MAC_CONF_TXENABLE;
730 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
731 	case IFM_10_T:
732 		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
733 		break;
734 	case IFM_100_TX:
735 		conf |= AWIN_GMAC_MAC_CONF_FES100 |
736 			AWIN_GMAC_MAC_CONF_MIISEL;
737 		break;
738 	case IFM_1000_T:
739 		break;
740 	}
741 	if (sc->sc_set_speed)
742 		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
743 
744 	flow = 0;
745 	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
746 		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
747 		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
748 	}
749 	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
750 		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
751 	}
752 	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
753 		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
754 	}
755 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
756 	    AWIN_GMAC_MAC_FLOWCTRL, flow);
757 
758 #ifdef DWC_GMAC_DEBUG
759 	aprint_normal_dev(sc->sc_dev,
760 	    "setting MAC conf register: %08x\n", conf);
761 #endif
762 
763 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
764 	    AWIN_GMAC_MAC_CONF, conf);
765 }
766 
767 static int
768 dwc_gmac_init(struct ifnet *ifp)
769 {
770 	struct dwc_gmac_softc *sc = ifp->if_softc;
771 
772 	mutex_enter(sc->sc_lock);
773 	int ret = dwc_gmac_init_locked(ifp);
774 	mutex_exit(sc->sc_lock);
775 
776 	return ret;
777 }
778 
779 static int
780 dwc_gmac_init_locked(struct ifnet *ifp)
781 {
782 	struct dwc_gmac_softc *sc = ifp->if_softc;
783 	uint32_t ffilt;
784 
785 	if (ifp->if_flags & IFF_RUNNING)
786 		return 0;
787 
788 	dwc_gmac_stop_locked(ifp, 0);
789 
790 	/*
791 	 * Configure DMA burst/transfer mode and RX/TX priorities.
792 	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
793 	 */
794 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
795 	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
796 	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
797 	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
798 
799 	/*
800 	 * Set up address filter
801 	 */
802 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
803 	if (ifp->if_flags & IFF_PROMISC) {
804 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
805 	} else {
806 		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
807 	}
808 	if (ifp->if_flags & IFF_BROADCAST) {
809 		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
810 	} else {
811 		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
812 	}
813 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
814 
815 	/*
816 	 * Set up multicast filter
817 	 */
818 	dwc_gmac_setmulti(sc);
819 
820 	/*
821 	 * Set up dma pointer for RX and TX ring
822 	 */
823 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
824 	    sc->sc_rxq.r_physaddr);
825 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
826 	    sc->sc_txq.t_physaddr);
827 
828 	/*
829 	 * Start RX/TX part
830 	 */
831 	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
832 	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
833 		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
834 	}
835 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
836 
837 	sc->sc_stopping = false;
838 
839 	ifp->if_flags |= IFF_RUNNING;
840 	ifp->if_flags &= ~IFF_OACTIVE;
841 
842 	return 0;
843 }
844 
845 static void
846 dwc_gmac_start(struct ifnet *ifp)
847 {
848 	struct dwc_gmac_softc *sc = ifp->if_softc;
849 #ifdef DWCGMAC_MPSAFE
850 	KASSERT(if_is_mpsafe(ifp));
851 #endif
852 
853 	mutex_enter(sc->sc_lock);
854 	if (!sc->sc_stopping) {
855 		mutex_enter(&sc->sc_txq.t_mtx);
856 		dwc_gmac_start_locked(ifp);
857 		mutex_exit(&sc->sc_txq.t_mtx);
858 	}
859 	mutex_exit(sc->sc_lock);
860 }
861 
862 static void
863 dwc_gmac_start_locked(struct ifnet *ifp)
864 {
865 	struct dwc_gmac_softc *sc = ifp->if_softc;
866 	int old = sc->sc_txq.t_queued;
867 	int start = sc->sc_txq.t_cur;
868 	struct mbuf *m0;
869 
870 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
871 		return;
872 
873 	for (;;) {
874 		IFQ_POLL(&ifp->if_snd, m0);
875 		if (m0 == NULL)
876 			break;
877 		if (dwc_gmac_queue(sc, m0) != 0) {
878 			ifp->if_flags |= IFF_OACTIVE;
879 			break;
880 		}
881 		IFQ_DEQUEUE(&ifp->if_snd, m0);
882 		bpf_mtap(ifp, m0, BPF_D_OUT);
883 		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
884 			ifp->if_flags |= IFF_OACTIVE;
885 			break;
886 		}
887 	}
888 
889 	if (sc->sc_txq.t_queued != old) {
890 		/* packets have been queued, kick it off */
891 		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
892 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
893 
894 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
895 		    AWIN_GMAC_DMA_TXPOLL, ~0U);
896 #ifdef DWC_GMAC_DEBUG
897 		dwc_dump_status(sc);
898 #endif
899 	}
900 }
901 
902 static void
903 dwc_gmac_stop(struct ifnet *ifp, int disable)
904 {
905 	struct dwc_gmac_softc *sc = ifp->if_softc;
906 
907 	mutex_enter(sc->sc_lock);
908 	dwc_gmac_stop_locked(ifp, disable);
909 	mutex_exit(sc->sc_lock);
910 }
911 
912 static void
913 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
914 {
915 	struct dwc_gmac_softc *sc = ifp->if_softc;
916 
917 	sc->sc_stopping = true;
918 
919 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
920 	    AWIN_GMAC_DMA_OPMODE,
921 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
922 	        AWIN_GMAC_DMA_OPMODE)
923 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
924 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
925 	    AWIN_GMAC_DMA_OPMODE,
926 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
927 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
928 
929 	mii_down(&sc->sc_mii);
930 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
931 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
932 
933 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
934 }
935 
936 /*
937  * Add m0 to the TX ring
938  */
939 static int
940 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
941 {
942 	struct dwc_gmac_dev_dmadesc *desc = NULL;
943 	struct dwc_gmac_tx_data *data = NULL;
944 	bus_dmamap_t map;
945 	uint32_t flags, len, status;
946 	int error, i, first;
947 
948 #ifdef DWC_GMAC_DEBUG
949 	aprint_normal_dev(sc->sc_dev,
950 	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
951 #endif
952 
953 	first = sc->sc_txq.t_cur;
954 	map = sc->sc_txq.t_data[first].td_map;
955 
956 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
957 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
958 	if (error != 0) {
959 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
960 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
961 		return error;
962 	}
963 
964 	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
965 		bus_dmamap_unload(sc->sc_dmat, map);
966 		return ENOBUFS;
967 	}
968 
969 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
970 	status = 0;
971 	for (i = 0; i < map->dm_nsegs; i++) {
972 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
973 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
974 
975 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
976 		len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
977 
978 #ifdef DWC_GMAC_DEBUG
979 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
980 		    "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
981 		    (unsigned long)map->dm_segs[i].ds_addr,
982 		    (unsigned long)map->dm_segs[i].ds_len,
983 		    flags, len);
984 #endif
985 
986 		desc->ddesc_cntl = htole32(len|flags);
987 		flags &= ~DDESC_CNTL_TXFIRST;
988 
989 		/*
990 		 * Defer passing ownership of the first descriptor
991 		 * until we are done.
992 		 */
993 		desc->ddesc_status = htole32(status);
994 		status |= DDESC_STATUS_OWNEDBYDEV;
995 
996 		sc->sc_txq.t_queued++;
997 		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
998 	}
999 
1000 	desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
1001 
1002 	data->td_m = m0;
1003 	data->td_active = map;
1004 
1005 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1006 	    BUS_DMASYNC_PREWRITE);
1007 
1008 	/* Pass first to device */
1009 	sc->sc_txq.t_desc[first].ddesc_status =
1010 	    htole32(DDESC_STATUS_OWNEDBYDEV);
1011 
1012 	return 0;
1013 }
1014 
1015 /*
1016  * If the interface is up and running, only modify the receive
1017  * filter when setting promiscuous or debug mode.  Otherwise fall
1018  * through to ether_ioctl, which will reset the chip.
1019  */
1020 static int
1021 dwc_gmac_ifflags_cb(struct ethercom *ec)
1022 {
1023 	struct ifnet *ifp = &ec->ec_if;
1024 	struct dwc_gmac_softc *sc = ifp->if_softc;
1025 	int ret = 0;
1026 
1027 	mutex_enter(sc->sc_lock);
1028 	int change = ifp->if_flags ^ sc->sc_if_flags;
1029 	sc->sc_if_flags = ifp->if_flags;
1030 
1031 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1032 		ret = ENETRESET;
1033 		goto out;
1034 	}
1035 	if ((change & IFF_PROMISC) != 0) {
1036 		dwc_gmac_setmulti(sc);
1037 	}
1038 out:
1039 	mutex_exit(sc->sc_lock);
1040 
1041 	return ret;
1042 }
1043 
1044 static int
1045 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1046 {
1047 	struct dwc_gmac_softc *sc = ifp->if_softc;
1048 	int error = 0;
1049 
1050 	int s = splnet();
1051 	error = ether_ioctl(ifp, cmd, data);
1052 
1053 #ifdef DWCGMAC_MPSAFE
1054 	splx(s);
1055 #endif
1056 
1057 	if (error == ENETRESET) {
1058 		error = 0;
1059 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1060 			;
1061 		else if (ifp->if_flags & IFF_RUNNING) {
1062 			/*
1063 			 * Multicast list has changed; set the hardware filter
1064 			 * accordingly.
1065 			 */
1066 			mutex_enter(sc->sc_lock);
1067 			dwc_gmac_setmulti(sc);
1068 			mutex_exit(sc->sc_lock);
1069 		}
1070 	}
1071 
1072 	/* Try to get things going again */
1073 	if (ifp->if_flags & IFF_UP)
1074 		dwc_gmac_start(ifp);
1075 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1076 
1077 #ifndef DWCGMAC_MPSAFE
1078 	splx(s);
1079 #endif
1080 
1081 	return error;
1082 }
1083 
1084 static void
1085 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1086 {
1087 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1088 	struct dwc_gmac_tx_data *data;
1089 	struct dwc_gmac_dev_dmadesc *desc;
1090 	uint32_t status;
1091 	int i, nsegs;
1092 
1093 	mutex_enter(&sc->sc_txq.t_mtx);
1094 
1095 	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1096 #ifdef DWC_GMAC_DEBUG
1097 		aprint_normal_dev(sc->sc_dev,
1098 		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1099 		    i, sc->sc_txq.t_queued);
1100 #endif
1101 
1102 		/*
1103 		 * i+1 does not need to be a valid descriptor,
1104 		 * this is just a special notion to just sync
1105 		 * a single tx descriptor (i)
1106 		 */
1107 		dwc_gmac_txdesc_sync(sc, i, i+1,
1108 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1109 
1110 		desc = &sc->sc_txq.t_desc[i];
1111 		status = le32toh(desc->ddesc_status);
1112 		if (status & DDESC_STATUS_OWNEDBYDEV)
1113 			break;
1114 
1115 		data = &sc->sc_txq.t_data[i];
1116 		if (data->td_m == NULL)
1117 			continue;
1118 
1119 		ifp->if_opackets++;
1120 		nsegs = data->td_active->dm_nsegs;
1121 		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1122 		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1123 		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1124 
1125 #ifdef DWC_GMAC_DEBUG
1126 		aprint_normal_dev(sc->sc_dev,
1127 		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1128 		    "freeing mbuf %p\n", i, data->td_m);
1129 #endif
1130 
1131 		m_freem(data->td_m);
1132 		data->td_m = NULL;
1133 
1134 		sc->sc_txq.t_queued -= nsegs;
1135 	}
1136 
1137 	sc->sc_txq.t_next = i;
1138 
1139 	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1140 		ifp->if_flags &= ~IFF_OACTIVE;
1141 	}
1142 	mutex_exit(&sc->sc_txq.t_mtx);
1143 }
1144 
1145 static void
1146 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1147 {
1148 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1149 	struct dwc_gmac_dev_dmadesc *desc;
1150 	struct dwc_gmac_rx_data *data;
1151 	bus_addr_t physaddr;
1152 	uint32_t status;
1153 	struct mbuf *m, *mnew;
1154 	int i, len, error;
1155 
1156 	mutex_enter(&sc->sc_rxq.r_mtx);
1157 	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1158 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1159 		    RX_DESC_OFFSET(i), sizeof(*desc),
1160 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1161 		desc = &sc->sc_rxq.r_desc[i];
1162 		data = &sc->sc_rxq.r_data[i];
1163 
1164 		status = le32toh(desc->ddesc_status);
1165 		if (status & DDESC_STATUS_OWNEDBYDEV)
1166 			break;
1167 
1168 		if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1169 #ifdef DWC_GMAC_DEBUG
1170 			aprint_normal_dev(sc->sc_dev,
1171 			    "RX error: descriptor status %08x, skipping\n",
1172 			    status);
1173 #endif
1174 			ifp->if_ierrors++;
1175 			goto skip;
1176 		}
1177 
1178 		len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1179 
1180 #ifdef DWC_GMAC_DEBUG
1181 		aprint_normal_dev(sc->sc_dev,
1182 		    "rx int: device is done with descriptor #%d, len: %d\n",
1183 		    i, len);
1184 #endif
1185 
1186 		/*
1187 		 * Try to get a new mbuf before passing this one
1188 		 * up, if that fails, drop the packet and reuse
1189 		 * the existing one.
1190 		 */
1191 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1192 		if (mnew == NULL) {
1193 			ifp->if_ierrors++;
1194 			goto skip;
1195 		}
1196 		MCLGET(mnew, M_DONTWAIT);
1197 		if ((mnew->m_flags & M_EXT) == 0) {
1198 			m_freem(mnew);
1199 			ifp->if_ierrors++;
1200 			goto skip;
1201 		}
1202 
1203 		/* unload old DMA map */
1204 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1205 		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1206 		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1207 
1208 		/* and reload with new mbuf */
1209 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1210 		    mtod(mnew, void*), MCLBYTES, NULL,
1211 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1212 		if (error != 0) {
1213 			m_freem(mnew);
1214 			/* try to reload old mbuf */
1215 			error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1216 			    mtod(data->rd_m, void*), MCLBYTES, NULL,
1217 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1218 			if (error != 0) {
1219 				panic("%s: could not load old rx mbuf",
1220 				    device_xname(sc->sc_dev));
1221 			}
1222 			ifp->if_ierrors++;
1223 			goto skip;
1224 		}
1225 		physaddr = data->rd_map->dm_segs[0].ds_addr;
1226 
1227 		/*
1228 		 * New mbuf loaded, update RX ring and continue
1229 		 */
1230 		m = data->rd_m;
1231 		data->rd_m = mnew;
1232 		desc->ddesc_data = htole32(physaddr);
1233 
1234 		/* finalize mbuf */
1235 		m->m_pkthdr.len = m->m_len = len;
1236 		m_set_rcvif(m, ifp);
1237 		m->m_flags |= M_HASFCS;
1238 
1239 		if_percpuq_enqueue(sc->sc_ipq, m);
1240 
1241 skip:
1242 		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1243 		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1244 		desc->ddesc_cntl = htole32(
1245 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1246 		    DDESC_CNTL_RXCHAIN);
1247 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1248 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1249 		    RX_DESC_OFFSET(i), sizeof(*desc),
1250 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1251 	}
1252 
1253 	/* update RX pointer */
1254 	sc->sc_rxq.r_cur = i;
1255 
1256 	mutex_exit(&sc->sc_rxq.r_mtx);
1257 }
1258 
1259 /*
1260  * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1261  */
1262 static uint32_t
1263 bitrev32(uint32_t x)
1264 {
1265 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1266 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1267 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1268 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1269 
1270 	return (x >> 16) | (x << 16);
1271 }
1272 
1273 static void
1274 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1275 {
1276 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1277 	struct ether_multi *enm;
1278 	struct ether_multistep step;
1279 	uint32_t hashes[2] = { 0, 0 };
1280 	uint32_t ffilt, h;
1281 	int mcnt;
1282 
1283 	KASSERT(mutex_owned(sc->sc_lock));
1284 
1285 	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1286 
1287 	if (ifp->if_flags & IFF_PROMISC) {
1288 		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1289 		goto special_filter;
1290 	}
1291 
1292 	ifp->if_flags &= ~IFF_ALLMULTI;
1293 	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1294 
1295 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1296 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1297 
1298 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1299 	mcnt = 0;
1300 	while (enm != NULL) {
1301 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1302 		    ETHER_ADDR_LEN) != 0) {
1303 			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1304 			ifp->if_flags |= IFF_ALLMULTI;
1305 			goto special_filter;
1306 		}
1307 
1308 		h = bitrev32(
1309 			~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1310 		    ) >> 26;
1311 		hashes[h >> 5] |= (1 << (h & 0x1f));
1312 
1313 		mcnt++;
1314 		ETHER_NEXT_MULTI(step, enm);
1315 	}
1316 
1317 	if (mcnt)
1318 		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1319 	else
1320 		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1321 
1322 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1323 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1324 	    hashes[0]);
1325 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1326 	    hashes[1]);
1327 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1328 
1329 #ifdef DWC_GMAC_DEBUG
1330 	dwc_gmac_dump_ffilt(sc, ffilt);
1331 #endif
1332 	return;
1333 
1334 special_filter:
1335 #ifdef DWC_GMAC_DEBUG
1336 	dwc_gmac_dump_ffilt(sc, ffilt);
1337 #endif
1338 	/* no MAC hashes, ALLMULTI or PROMISC */
1339 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1340 	    ffilt);
1341 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1342 	    0xffffffff);
1343 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1344 	    0xffffffff);
1345 	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1346 }
1347 
1348 int
1349 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1350 {
1351 	uint32_t status, dma_status;
1352 	int rv = 0;
1353 
1354 	if (sc->sc_stopping)
1355 		return 0;
1356 
1357 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1358 	if (status & AWIN_GMAC_MII_IRQ) {
1359 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1360 		    AWIN_GMAC_MII_STATUS);
1361 		rv = 1;
1362 		mii_pollstat(&sc->sc_mii);
1363 	}
1364 
1365 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1366 	    AWIN_GMAC_DMA_STATUS);
1367 
1368 	if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1369 		rv = 1;
1370 
1371 	if (dma_status & GMAC_DMA_INT_TIE)
1372 		dwc_gmac_tx_intr(sc);
1373 
1374 	if (dma_status & GMAC_DMA_INT_RIE)
1375 		dwc_gmac_rx_intr(sc);
1376 
1377 	/*
1378 	 * Check error conditions
1379 	 */
1380 	if (dma_status & GMAC_DMA_INT_ERRORS) {
1381 		sc->sc_ec.ec_if.if_oerrors++;
1382 #ifdef DWC_GMAC_DEBUG
1383 		dwc_dump_and_abort(sc, "interrupt error condition");
1384 #endif
1385 	}
1386 
1387 	/* ack interrupt */
1388 	if (dma_status)
1389 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1390 		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1391 
1392 	/*
1393 	 * Get more packets
1394 	 */
1395 	if (rv)
1396 		if_schedule_deferred_start(&sc->sc_ec.ec_if);
1397 
1398 	return rv;
1399 }
1400 
1401 #ifdef DWC_GMAC_DEBUG
1402 static void
1403 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1404 {
1405 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1406 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1407 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1408 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1409 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1410 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1411 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1412 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1413 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1414 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1415 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1416 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1417 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1418 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1419 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1420 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1421 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1422 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1423 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1424 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1425 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1426 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1427 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1428 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1429 }
1430 
1431 static void
1432 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1433 {
1434 	int i;
1435 
1436 	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1437 	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1438 	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1439 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1440 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1441 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1442 		    "data: %08x next: %08x\n",
1443 		    i, sc->sc_txq.t_physaddr +
1444 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1445 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1446 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1447 	}
1448 }
1449 
1450 static void
1451 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1452 {
1453 	int i;
1454 
1455 	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1456 	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1457 	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1458 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1459 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1460 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1461 		    "data: %08x next: %08x\n",
1462 		    i, sc->sc_rxq.r_physaddr +
1463 			i*sizeof(struct dwc_gmac_dev_dmadesc),
1464 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1465 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1466 	}
1467 }
1468 
1469 static void
1470 dwc_dump_status(struct dwc_gmac_softc *sc)
1471 {
1472 	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1473 	     AWIN_GMAC_MAC_INTR);
1474 	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1475 	     AWIN_GMAC_DMA_STATUS);
1476 	char buf[200];
1477 
1478 	/* print interrupt state */
1479 	snprintb(buf, sizeof(buf), "\177\20"
1480 	    "b\x10""NI\0"
1481 	    "b\x0f""AI\0"
1482 	    "b\x0e""ER\0"
1483 	    "b\x0d""FB\0"
1484 	    "b\x0a""ET\0"
1485 	    "b\x09""RW\0"
1486 	    "b\x08""RS\0"
1487 	    "b\x07""RU\0"
1488 	    "b\x06""RI\0"
1489 	    "b\x05""UN\0"
1490 	    "b\x04""OV\0"
1491 	    "b\x03""TJ\0"
1492 	    "b\x02""TU\0"
1493 	    "b\x01""TS\0"
1494 	    "b\x00""TI\0"
1495 	    "\0", dma_status);
1496 	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1497 	    status, buf);
1498 }
1499 
1500 static void
1501 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1502 {
1503 	dwc_dump_status(sc);
1504 	dwc_gmac_dump_ffilt(sc,
1505 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1506 	dwc_gmac_dump_dma(sc);
1507 	dwc_gmac_dump_tx_desc(sc);
1508 	dwc_gmac_dump_rx_desc(sc);
1509 
1510 	panic("%s", msg);
1511 }
1512 
1513 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1514 {
1515 	char buf[200];
1516 
1517 	/* print filter setup */
1518 	snprintb(buf, sizeof(buf), "\177\20"
1519 	    "b\x1f""RA\0"
1520 	    "b\x0a""HPF\0"
1521 	    "b\x09""SAF\0"
1522 	    "b\x08""SAIF\0"
1523 	    "b\x05""DBF\0"
1524 	    "b\x04""PM\0"
1525 	    "b\x03""DAIF\0"
1526 	    "b\x02""HMC\0"
1527 	    "b\x01""HUC\0"
1528 	    "b\x00""PR\0"
1529 	    "\0", ffilt);
1530 	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1531 }
1532 #endif
1533