xref: /netbsd-src/sys/dev/ic/dwc_gmac.c (revision 80d9064ac03cbb6a4174695f0d5b237c8766d3d0)
1 /*-
2  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * This driver supports the Synopsis Designware GMAC core, as found
32  * on Allwinner A20 cores and others.
33  *
34  * Real documentation seems to not be available, the marketing product
35  * documents could be found here:
36  *
37  *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38  */
39 
40 #include <sys/cdefs.h>
41 
42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.7 2014/09/14 18:28:37 martin Exp $");
43 
44 /* #define	DWC_GMAC_DEBUG	1 */
45 
46 #include "opt_inet.h"
47 
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/device.h>
51 #include <sys/intr.h>
52 #include <sys/systm.h>
53 #include <sys/sockio.h>
54 
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #ifdef INET
60 #include <netinet/if_inarp.h>
61 #endif
62 
63 #include <dev/mii/miivar.h>
64 
65 #include <dev/ic/dwc_gmac_reg.h>
66 #include <dev/ic/dwc_gmac_var.h>
67 
68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 static void dwc_gmac_miibus_statchg(struct ifnet *);
71 
72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 			 uint8_t enaddr[ETHER_ADDR_LEN]);
75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 static int dwc_gmac_init(struct ifnet *ifp);
85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 static void dwc_gmac_start(struct ifnet *ifp);
87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 
90 
91 #define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT+(N)) \
92 				    *sizeof(struct dwc_gmac_dev_dmadesc))
93 
94 #define RX_DESC_OFFSET(N)	((N)*sizeof(struct dwc_gmac_dev_dmadesc))
95 
96 
97 #ifdef DWC_GMAC_DEBUG
98 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
100 #endif
101 
102 void
103 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
104 {
105 	uint8_t enaddr[ETHER_ADDR_LEN];
106 	uint32_t maclo, machi;
107 	struct mii_data * const mii = &sc->sc_mii;
108 	struct ifnet * const ifp = &sc->sc_ec.ec_if;
109 	prop_dictionary_t dict;
110 
111 	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
112 	sc->sc_mii_clk = mii_clk & 7;
113 
114 	dict = device_properties(sc->sc_dev);
115 	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
116 	if (ea != NULL) {
117 		/*
118 		 * If the MAC address is overriden by a device property,
119 		 * use that.
120 		 */
121 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
122 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
123 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
124 	} else {
125 		/*
126 		 * If we did not get an externaly configure address,
127 		 * try to read one from the current filter setup,
128 		 * before resetting the chip.
129 		 */
130 		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
131 		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
132 		enaddr[0] = maclo & 0x0ff;
133 		enaddr[1] = (maclo >> 8) & 0x0ff;
134 		enaddr[2] = (maclo >> 16) & 0x0ff;
135 		enaddr[3] = (maclo >> 24) & 0x0ff;
136 		enaddr[4] = machi & 0x0ff;
137 		enaddr[5] = (machi >> 8) & 0x0ff;
138 	}
139 
140 #ifdef DWC_GMAC_DEBUG
141 	dwc_gmac_dump_dma(sc);
142 #endif
143 
144 	/*
145 	 * Init chip and do intial setup
146 	 */
147 	if (dwc_gmac_reset(sc) != 0)
148 		return;	/* not much to cleanup, haven't attached yet */
149 	dwc_gmac_write_hwaddr(sc, enaddr);
150 	aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
151 	    ether_sprintf(enaddr));
152 
153 	/*
154 	 * Allocate Tx and Rx rings
155 	 */
156 	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
157 		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
158 		goto fail;
159 	}
160 
161 	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
162 		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
163 		goto fail;
164 	}
165 
166 	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
167 	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
168 		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
169 		goto fail;
170 	}
171 
172 	/*
173 	 * Prepare interface data
174 	 */
175 	ifp->if_softc = sc;
176 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
177 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
178 	ifp->if_ioctl = dwc_gmac_ioctl;
179 	ifp->if_start = dwc_gmac_start;
180 	ifp->if_init = dwc_gmac_init;
181 	ifp->if_stop = dwc_gmac_stop;
182 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
183 	IFQ_SET_READY(&ifp->if_snd);
184 
185 	/*
186 	 * Attach MII subdevices
187 	 */
188 	sc->sc_ec.ec_mii = &sc->sc_mii;
189 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
190         mii->mii_ifp = ifp;
191         mii->mii_readreg = dwc_gmac_miibus_read_reg;
192         mii->mii_writereg = dwc_gmac_miibus_write_reg;
193         mii->mii_statchg = dwc_gmac_miibus_statchg;
194         mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
195 
196         if (LIST_EMPTY(&mii->mii_phys)) {
197                 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
198                 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
199                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
200         } else {
201                 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
202         }
203 
204 	/*
205 	 * Ready, attach interface
206 	 */
207 	if_attach(ifp);
208 	ether_ifattach(ifp, enaddr);
209 
210 	/*
211 	 * Enable interrupts
212 	 */
213 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
214 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
215 
216 	return;
217 
218 fail:
219 	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
220 	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
221 }
222 
223 
224 
225 static int
226 dwc_gmac_reset(struct dwc_gmac_softc *sc)
227 {
228 	size_t cnt;
229 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
230 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
231 	for (cnt = 0; cnt < 3000; cnt++) {
232 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
233 		    & GMAC_BUSMODE_RESET) == 0)
234 			return 0;
235 		delay(10);
236 	}
237 
238 	aprint_error_dev(sc->sc_dev, "reset timed out\n");
239 	return EIO;
240 }
241 
242 static void
243 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
244     uint8_t enaddr[ETHER_ADDR_LEN])
245 {
246 	uint32_t lo, hi;
247 
248 	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
249 	    | (enaddr[3] << 24);
250 	hi = enaddr[4] | (enaddr[5] << 8);
251 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
252 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
253 }
254 
255 static int
256 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
257 {
258 	struct dwc_gmac_softc * const sc = device_private(self);
259 	uint16_t mii;
260 	size_t cnt;
261 	int rv = 0;
262 
263 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
264 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
265 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
266 	    | GMAC_MII_BUSY;
267 
268 	mutex_enter(&sc->sc_mdio_lock);
269 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
270 
271 	for (cnt = 0; cnt < 1000; cnt++) {
272 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
273 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
274 			rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
275 			    AWIN_GMAC_MAC_MIIDATA);
276 			break;
277 		}
278 		delay(10);
279 	}
280 
281 	mutex_exit(&sc->sc_mdio_lock);
282 
283 	return rv;
284 }
285 
286 static void
287 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
288 {
289 	struct dwc_gmac_softc * const sc = device_private(self);
290 	uint16_t mii;
291 	size_t cnt;
292 
293 	mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
294 	    | __SHIFTIN(reg,GMAC_MII_REG_MASK)
295 	    | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
296 	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
297 
298 	mutex_enter(&sc->sc_mdio_lock);
299 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
300 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
301 
302 	for (cnt = 0; cnt < 1000; cnt++) {
303 		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
304 		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
305 			break;
306 		delay(10);
307 	}
308 
309 	mutex_exit(&sc->sc_mdio_lock);
310 }
311 
312 static int
313 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
314 	struct dwc_gmac_rx_ring *ring)
315 {
316 	struct dwc_gmac_rx_data *data;
317 	bus_addr_t physaddr;
318 	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
319 	int error, i, next;
320 
321 	ring->r_cur = ring->r_next = 0;
322 	memset(ring->r_desc, 0, descsize);
323 
324 	/*
325 	 * Pre-allocate Rx buffers and populate Rx ring.
326 	 */
327 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
328 		struct dwc_gmac_dev_dmadesc *desc;
329 
330 		data = &sc->sc_rxq.r_data[i];
331 
332 		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
333 		if (data->rd_m == NULL) {
334 			aprint_error_dev(sc->sc_dev,
335 			    "could not allocate rx mbuf #%d\n", i);
336 			error = ENOMEM;
337 			goto fail;
338 		}
339 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
340 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
341 		if (error != 0) {
342 			aprint_error_dev(sc->sc_dev,
343 			    "could not create DMA map\n");
344 			data->rd_map = NULL;
345 			goto fail;
346 		}
347 		MCLGET(data->rd_m, M_DONTWAIT);
348 		if (!(data->rd_m->m_flags & M_EXT)) {
349 			aprint_error_dev(sc->sc_dev,
350 			    "could not allocate mbuf cluster #%d\n", i);
351 			error = ENOMEM;
352 			goto fail;
353 		}
354 
355 		error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
356 		    mtod(data->rd_m, void *), MCLBYTES, NULL,
357 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
358 		if (error != 0) {
359 			aprint_error_dev(sc->sc_dev,
360 			    "could not load rx buf DMA map #%d", i);
361 			goto fail;
362 		}
363 		physaddr = data->rd_map->dm_segs[0].ds_addr;
364 
365 		desc = &sc->sc_rxq.r_desc[i];
366 		desc->ddesc_data = htole32(physaddr);
367 		next = (i+1) % AWGE_RX_RING_COUNT;
368 		desc->ddesc_next = htole32(ring->r_physaddr
369 		    + next * sizeof(*desc));
370 		desc->ddesc_cntl = htole32(
371 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
372 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
373 	}
374 
375 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
376 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
377 	    BUS_DMASYNC_PREREAD);
378 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
379 	    ring->r_physaddr);
380 
381 	return 0;
382 
383 fail:
384 	dwc_gmac_free_rx_ring(sc, ring);
385 	return error;
386 }
387 
388 static void
389 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
390 	struct dwc_gmac_rx_ring *ring)
391 {
392 	struct dwc_gmac_dev_dmadesc *desc;
393 	int i;
394 
395 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
396 		desc = &sc->sc_rxq.r_desc[i];
397 		desc->ddesc_cntl = htole32(
398 		    __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
399 		desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
400 	}
401 
402 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
403 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
404 	    BUS_DMASYNC_PREWRITE);
405 
406 	ring->r_cur = ring->r_next = 0;
407 }
408 
409 static int
410 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
411 {
412 	const size_t descsize = AWGE_TOTAL_RING_COUNT *
413 		sizeof(struct dwc_gmac_dev_dmadesc);
414 	int error, nsegs;
415 	void *rings;
416 
417 	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
418 	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
419 	if (error != 0) {
420 		aprint_error_dev(sc->sc_dev,
421 		    "could not create desc DMA map\n");
422 		sc->sc_dma_ring_map = NULL;
423 		goto fail;
424 	}
425 
426 	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
427 	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
428 	if (error != 0) {
429 		aprint_error_dev(sc->sc_dev,
430 		    "could not map DMA memory\n");
431 		goto fail;
432 	}
433 
434 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
435 	    descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
436 	if (error != 0) {
437 		aprint_error_dev(sc->sc_dev,
438 		    "could not allocate DMA memory\n");
439 		goto fail;
440 	}
441 
442 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
443 	    descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
444 	if (error != 0) {
445 		aprint_error_dev(sc->sc_dev,
446 		    "could not load desc DMA map\n");
447 		goto fail;
448 	}
449 
450 	/* give first AWGE_RX_RING_COUNT to the RX side */
451 	sc->sc_rxq.r_desc = rings;
452 	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
453 
454 	/* and next rings to the TX side */
455 	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
456 	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
457 	    AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
458 
459 	return 0;
460 
461 fail:
462 	dwc_gmac_free_dma_rings(sc);
463 	return error;
464 }
465 
466 static void
467 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
468 {
469 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
470 	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
471 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
472 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
473 	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
474 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
475 }
476 
477 static void
478 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
479 {
480 	struct dwc_gmac_rx_data *data;
481 	int i;
482 
483 	if (ring->r_desc == NULL)
484 		return;
485 
486 
487 	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
488 		data = &ring->r_data[i];
489 
490 		if (data->rd_map != NULL) {
491 			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
492 			    AWGE_RX_RING_COUNT
493 				*sizeof(struct dwc_gmac_dev_dmadesc),
494 			    BUS_DMASYNC_POSTREAD);
495 			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
496 			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
497 		}
498 		if (data->rd_m != NULL)
499 			m_freem(data->rd_m);
500 	}
501 }
502 
503 static int
504 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
505 	struct dwc_gmac_tx_ring *ring)
506 {
507 	int i, error = 0;
508 
509 	ring->t_queued = 0;
510 	ring->t_cur = ring->t_next = 0;
511 
512 	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
513 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
514 	    TX_DESC_OFFSET(0),
515 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
516 	    BUS_DMASYNC_POSTWRITE);
517 
518 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
519 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
520 		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
521 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
522 		    &ring->t_data[i].td_map);
523 		if (error != 0) {
524 			aprint_error_dev(sc->sc_dev,
525 			    "could not create TX DMA map #%d\n", i);
526 			ring->t_data[i].td_map = NULL;
527 			goto fail;
528 		}
529 		ring->t_desc[i].ddesc_next = htole32(
530 		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
531 		    *((i+1)%AWGE_TX_RING_COUNT));
532 	}
533 
534 	return 0;
535 
536 fail:
537 	dwc_gmac_free_tx_ring(sc, ring);
538 	return error;
539 }
540 
541 static void
542 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
543 {
544 	/* 'end' is pointing one descriptor beyound the last we want to sync */
545 	if (end > start) {
546 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
547 		    TX_DESC_OFFSET(start),
548 		    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
549 		    ops);
550 		return;
551 	}
552 	/* sync from 'start' to end of ring */
553 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
554 	    TX_DESC_OFFSET(start),
555 	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
556 	    ops);
557 	/* sync from start of ring to 'end' */
558 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
559 	    TX_DESC_OFFSET(0),
560 	    TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
561 	    ops);
562 }
563 
564 static void
565 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
566 	struct dwc_gmac_tx_ring *ring)
567 {
568 	int i;
569 
570 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
571 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
572 
573 		if (data->td_m != NULL) {
574 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
575 			    0, data->td_active->dm_mapsize,
576 			    BUS_DMASYNC_POSTWRITE);
577 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
578 			m_freem(data->td_m);
579 			data->td_m = NULL;
580 		}
581 	}
582 
583 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
584 	    TX_DESC_OFFSET(0),
585 	    AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
586 	    BUS_DMASYNC_PREWRITE);
587 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
588 	    sc->sc_txq.t_physaddr);
589 
590 	ring->t_queued = 0;
591 	ring->t_cur = ring->t_next = 0;
592 }
593 
594 static void
595 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
596 	struct dwc_gmac_tx_ring *ring)
597 {
598 	int i;
599 
600 	/* unload the maps */
601 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
602 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
603 
604 		if (data->td_m != NULL) {
605 			bus_dmamap_sync(sc->sc_dmat, data->td_active,
606 			    0, data->td_map->dm_mapsize,
607 			    BUS_DMASYNC_POSTWRITE);
608 			bus_dmamap_unload(sc->sc_dmat, data->td_active);
609 			m_freem(data->td_m);
610 			data->td_m = NULL;
611 		}
612 	}
613 
614 	/* and actually free them */
615 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
616 		struct dwc_gmac_tx_data *data = &ring->t_data[i];
617 
618 		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
619 	}
620 }
621 
622 static void
623 dwc_gmac_miibus_statchg(struct ifnet *ifp)
624 {
625 	struct dwc_gmac_softc * const sc = ifp->if_softc;
626 	struct mii_data * const mii = &sc->sc_mii;
627 
628 	/*
629 	 * Set MII or GMII interface based on the speed
630 	 * negotiated by the PHY.
631 	 */
632 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
633 	case IFM_10_T:
634 	case IFM_100_TX:
635 		/* XXX */
636 		break;
637 	case IFM_1000_T:
638 		/* XXX */
639 		break;
640 	}
641 }
642 
643 static int
644 dwc_gmac_init(struct ifnet *ifp)
645 {
646 	struct dwc_gmac_softc *sc = ifp->if_softc;
647 
648 	if (ifp->if_flags & IFF_RUNNING)
649 		return 0;
650 
651 	dwc_gmac_stop(ifp, 0);
652 
653 	/*
654 	 * Set up dma pointer for RX and TX ring
655 	 */
656 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
657 	    sc->sc_rxq.r_physaddr);
658 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
659 	    sc->sc_txq.t_physaddr);
660 
661 	/*
662 	 * Start RX part
663 	 */
664 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
665 	    AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
666 
667 	ifp->if_flags |= IFF_RUNNING;
668 	ifp->if_flags &= ~IFF_OACTIVE;
669 
670 	return 0;
671 }
672 
673 static void
674 dwc_gmac_start(struct ifnet *ifp)
675 {
676 	struct dwc_gmac_softc *sc = ifp->if_softc;
677 	int old = sc->sc_txq.t_queued;
678 	struct mbuf *m0;
679 
680 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
681 		return;
682 
683 	for (;;) {
684 		IFQ_POLL(&ifp->if_snd, m0);
685 		if (m0 == NULL)
686 			break;
687 		if (dwc_gmac_queue(sc, m0) != 0) {
688 			ifp->if_flags |= IFF_OACTIVE;
689 			break;
690 		}
691 		IFQ_DEQUEUE(&ifp->if_snd, m0);
692 		bpf_mtap(ifp, m0);
693 	}
694 
695 	if (sc->sc_txq.t_queued != old) {
696 		/* packets have been queued, kick it off */
697 		dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
698 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
699 
700 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
701 		    AWIN_GMAC_DMA_OPMODE,
702 		    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
703 		        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
704 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
705 		    AWIN_GMAC_DMA_OPMODE,
706 		    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
707 		        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
708 	}
709 
710 #ifdef DWC_GMAC_DEBUG
711 	dwc_gmac_dump_dma(sc);
712 	dwc_gmac_dump_tx_desc(sc);
713 #endif
714 }
715 
716 static void
717 dwc_gmac_stop(struct ifnet *ifp, int disable)
718 {
719 	struct dwc_gmac_softc *sc = ifp->if_softc;
720 
721 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
722 	    AWIN_GMAC_DMA_OPMODE,
723 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
724 	        AWIN_GMAC_DMA_OPMODE)
725 		& ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
726 	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
727 	    AWIN_GMAC_DMA_OPMODE,
728 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
729 	        AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
730 
731 	mii_down(&sc->sc_mii);
732 	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
733 	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
734 }
735 
736 /*
737  * Add m0 to the TX ring
738  */
739 static int
740 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
741 {
742 	struct dwc_gmac_dev_dmadesc *desc = NULL;
743 	struct dwc_gmac_tx_data *data = NULL;
744 	bus_dmamap_t map;
745 	uint32_t flags, len;
746 	int error, i, first;
747 
748 	first = sc->sc_txq.t_cur;
749 	map = sc->sc_txq.t_data[first].td_map;
750 	flags = 0;
751 
752 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
753 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
754 	if (error != 0) {
755 		aprint_error_dev(sc->sc_dev, "could not map mbuf "
756 		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
757 		return error;
758 	}
759 
760 	if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
761 		bus_dmamap_unload(sc->sc_dmat, map);
762 		return ENOBUFS;
763 	}
764 
765 	data = NULL;
766 	flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXINT|DDESC_CNTL_TXCHAIN;
767 	for (i = 0; i < map->dm_nsegs; i++) {
768 		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
769 
770 #ifdef DWC_GMAC_DEBUG
771 		aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
772 		    "len %lu\n", sc->sc_txq.t_cur,
773 		    (unsigned long)map->dm_segs[i].ds_addr,
774 		    (unsigned long)map->dm_segs[i].ds_len);
775 #endif
776 
777 		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
778 
779 		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
780 		len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
781 		if (i == map->dm_nsegs-1)
782 			flags |= DDESC_CNTL_TXLAST;
783 		desc->ddesc_cntl = htole32(len|flags);
784 		flags &= ~DDESC_CNTL_TXFIRST;
785 
786 		/*
787 		 * Defer passing ownership of the first descriptor
788 		 * untill we are done.
789 		 */
790 		if (i)
791 			desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
792 		sc->sc_txq.t_queued++;
793 
794 		sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
795 		    & (AWGE_TX_RING_COUNT-1);
796 	}
797 
798 	/* Pass first to device */
799 	sc->sc_txq.t_desc[first].ddesc_status
800 	    = htole32(DDESC_STATUS_OWNEDBYDEV);
801 
802 	data->td_m = m0;
803 	data->td_active = map;
804 
805 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
806 	    BUS_DMASYNC_PREWRITE);
807 
808 	return 0;
809 }
810 
811 static int
812 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
813 {
814 	// struct dwc_gmac_softc *sc = ifp->if_softc;
815 	struct ifaddr *ifa = (struct ifaddr *)data;
816 	int s, error = 0;
817 
818 	s = splnet();
819 
820 	switch (cmd) {
821 	case SIOCINITIFADDR:
822 		ifp->if_flags |= IFF_UP;
823 		dwc_gmac_init(ifp);
824 		switch (ifa->ifa_addr->sa_family) {
825 #ifdef INET
826 		case AF_INET:
827 			arp_ifinit(ifp, ifa);
828 			break;
829 #endif
830 		default:
831 			break;
832 		}
833 	default:
834 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
835 			break;
836 		error = 0;
837 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
838 			;
839 		else if (ifp->if_flags & IFF_RUNNING)
840 			/* setmulti */;
841 		break;
842 	}
843 
844 	splx(s);
845 
846 	return error;
847 }
848 
849 int
850 dwc_gmac_intr(struct dwc_gmac_softc *sc)
851 {
852 	uint32_t status, dma_status;
853 
854 	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
855 	if (status & AWIN_GMAC_MII_IRQ) {
856 		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
857 		    AWIN_GMAC_MII_STATUS);
858 		mii_pollstat(&sc->sc_mii);
859 	}
860 
861 	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
862 	    AWIN_GMAC_DMA_STATUS);
863 
864 printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
865     status, dma_status);
866 
867 static size_t cnt = 0;
868 if (++cnt > 20)
869 	panic("enough now");
870 
871 	return 1;
872 }
873 
874 #ifdef DWC_GMAC_DEBUG
875 static void
876 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
877 {
878 	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
879 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
880 	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
881 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
882 	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
883 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
884 	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
885 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
886 	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
887 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
888 	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
889 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
890 	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
891 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
892 	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
893 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
894 	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
895 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
896 	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
897 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
898 	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
899 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
900 	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
901 	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
902 }
903 
904 static void
905 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
906 {
907 	int i;
908 
909 	aprint_normal_dev(sc->sc_dev, " TX DMA descriptors:\n");
910 	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
911 		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
912 		aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
913 		    i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
914 		    le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
915 		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
916 
917 	}
918 }
919 #endif
920