xref: /netbsd-src/sys/arch/arm/gemini/if_gmc.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /* $NetBSD: if_gmc.c,v 1.6 2013/06/11 16:57:05 msaitoh Exp $ */
2 /*-
3  * Copyright (c) 2008 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas <matt@3am-software.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/param.h>
32 #include <sys/callout.h>
33 #include <sys/device.h>
34 #include <sys/ioctl.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/mbuf.h>
38 
39 #include <sys/bus.h>
40 #include <machine/intr.h>
41 
42 #include <arm/gemini/gemini_reg.h>
43 #include <arm/gemini/gemini_gmacreg.h>
44 #include <arm/gemini/gemini_gmacvar.h>
45 
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_dl.h>
49 
50 __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.6 2013/06/11 16:57:05 msaitoh Exp $");
51 
52 #define	MAX_TXSEG	32
53 
54 struct gmc_softc {
55 	device_t sc_dev;
56 	struct gmac_softc *sc_psc;
57 	struct gmc_softc *sc_sibling;
58 	bus_dma_tag_t sc_dmat;
59 	bus_space_tag_t sc_iot;
60 	bus_space_handle_t sc_ioh;
61 	bus_space_handle_t sc_dma_ioh;
62 	bus_space_handle_t sc_gmac_ioh;
63 	struct ethercom sc_ec;
64 	struct mii_data sc_mii;
65 	void *sc_ih;
66 	bool sc_port1;
67 	uint8_t sc_phy;
68 	gmac_hwqueue_t *sc_rxq;
69 	gmac_hwqueue_t *sc_txq[6];
70 	callout_t sc_mii_ch;
71 
72 	uint32_t sc_gmac_status;
73 	uint32_t sc_gmac_sta_add[3];
74 	uint32_t sc_gmac_mcast_filter[2];
75 	uint32_t sc_gmac_rx_filter;
76 	uint32_t sc_gmac_config[2];
77 	uint32_t sc_dmavr;
78 
79 	uint32_t sc_int_mask[5];
80 	uint32_t sc_int_enabled[5];
81 };
82 
83 #define	sc_if	sc_ec.ec_if
84 
85 static bool
86 gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
87 {
88 	bus_dmamap_t map;
89 	uint32_t desc0, desc1, desc3;
90 	struct mbuf *last_m, *m0;
91 	size_t count, i;
92 	int error;
93 	gmac_desc_t *d;
94 
95 	KASSERT(hwq != NULL);
96 
97 	map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
98 	if (map == NULL)
99 		return false;
100 
101 	for (last_m = NULL, m0 = m, count = 0;
102 	     m0 != NULL;
103 	     last_m = m0, m0 = m0->m_next) {
104 		vaddr_t addr = (uintptr_t)m0->m_data;
105 		if (m0->m_len == 0)
106 			continue;
107 		if (addr & 1) {
108 			if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
109 				last_m->m_data[last_m->m_len++] = *m->m_data++;
110 				m->m_len--;
111 			} else if (M_TRAILINGSPACE(m0) > 0) {
112 				memmove(m0->m_data + 1, m0->m_data, m0->m_len);
113 				m0->m_data++;
114 			} else if (M_LEADINGSPACE(m0) > 0) {
115 				memmove(m0->m_data - 1, m0->m_data, m0->m_len);
116 				m0->m_data--;
117 			} else {
118 				panic("gmc_txqueue: odd addr %p", m0->m_data);
119 			}
120 		}
121 		count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
122 	}
123 
124 	gmac_hwqueue_sync(hwq);
125 	if (hwq->hwq_free <= count) {
126 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
127 		return false;
128 	}
129 
130 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
131 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
132 	if (error) {
133 		aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
134 		    error);
135 		gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
136 		m_freem(m);
137 		sc->sc_if.if_oerrors++;
138 		return true;
139 	}
140 	KASSERT(map->dm_nsegs > 0);
141 
142 	/*
143 	 * Sync the mbuf contents to memory/cache.
144 	 */
145 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
146 		BUS_DMASYNC_PREWRITE);
147 
148 	/*
149 	 * Now we need to load the descriptors...
150 	 */
151 	desc0 = map->dm_nsegs << 16;
152 	desc1 = m->m_pkthdr.len;
153 	desc3 = DESC3_SOF;
154 	i = 0;
155 	d = NULL;
156 	do {
157 #if 0
158 		if (i > 0)
159 			aprint_debug_dev(sc->sc_dev,
160 			    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
161 			    i-1, d, d->d_desc0, d->d_desc1,
162 			    d->d_bufaddr, d->d_desc3);
163 #endif
164 		d = gmac_hwqueue_desc(hwq, i);
165 		KASSERT(map->dm_segs[i].ds_len > 0);
166 		KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
167 		d->d_desc0 = htole32(map->dm_segs[i].ds_len | desc0);
168 		d->d_desc1 = htole32(desc1);
169 		d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
170 		d->d_desc3 = htole32(desc3);
171 		desc3 = 0;
172 	} while (++i < map->dm_nsegs);
173 
174 	d->d_desc3 |= htole32(DESC3_EOF|DESC3_EOFIE);
175 #if 0
176 	aprint_debug_dev(sc->sc_dev,
177 	    "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
178 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
179 #endif
180 	M_SETCTX(m, map);
181 	IF_ENQUEUE(&hwq->hwq_ifq, m);
182 	/*
183 	 * Last descriptor has been marked.  Give them to the h/w.
184 	 * This will sync for us.
185 	 */
186 	gmac_hwqueue_produce(hwq, map->dm_nsegs);
187 #if 0
188 	aprint_debug_dev(sc->sc_dev,
189 	    "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
190 	    i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
191 #endif
192 	return true;
193 }
194 
195 static void
196 gmc_filter_change(struct gmc_softc *sc)
197 {
198 	struct ether_multi *enm;
199 	struct ether_multistep step;
200 	uint32_t mhash[2];
201 	uint32_t new0, new1, new2;
202 	const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
203 
204 	new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
205 	new1 = eaddr[4] | (eaddr[5] << 8);
206 	new2 = 0;
207 	if (sc->sc_gmac_sta_add[0] != new0
208 	    || sc->sc_gmac_sta_add[1] != new1
209 	    || sc->sc_gmac_sta_add[2] != new2) {
210 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
211 		    new0);
212 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
213 		    new1);
214 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
215 		    new2);
216 		sc->sc_gmac_sta_add[0] = new0;
217 		sc->sc_gmac_sta_add[1] = new1;
218 		sc->sc_gmac_sta_add[2] = new2;
219 	}
220 
221 	mhash[0] = 0;
222 	mhash[1] = 0;
223 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
224 	while (enm != NULL) {
225 		size_t i;
226 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
227 			mhash[0] = mhash[1] = 0xffffffff;
228 			break;
229 		}
230 		i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
231 		mhash[(i >> 5) & 1] |= 1 << (i & 31);
232 		ETHER_NEXT_MULTI(step, enm);
233 	}
234 
235 	if (sc->sc_gmac_mcast_filter[0] != mhash[0]
236 	    || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
237 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
238 		    GMAC_MCAST_FILTER0, mhash[0]);
239 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
240 		    GMAC_MCAST_FILTER1, mhash[1]);
241 		sc->sc_gmac_mcast_filter[0] = mhash[0];
242 		sc->sc_gmac_mcast_filter[1] = mhash[1];
243 	}
244 
245 	new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
246 	new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
247 	if (sc->sc_if.if_flags & IFF_PROMISC)
248 		new0 |= RXFILTER_PROMISC;
249 
250 	if (new0 != sc->sc_gmac_rx_filter) {
251 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
252 		    new0);
253 		sc->sc_gmac_rx_filter = new0;
254 	}
255 }
256 
257 static void
258 gmc_mii_tick(void *arg)
259 {
260 	struct gmc_softc * const sc = arg;
261 	struct gmac_softc * const psc = sc->sc_psc;
262 	int s = splnet();
263 
264 	/*
265 	 * If we had to increase the number of receive mbufs due to fifo
266 	 * overflows, we need a way to decrease them.  So every second we
267 	 * recieve less than or equal to MIN_RXMAPS packets, we decrement
268 	 * swfree_min until it returns to MIN_RXMAPS.
269 	 */
270 	if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
271 	    && psc->sc_swfree_min > MIN_RXMAPS) {
272 		psc->sc_swfree_min--;
273 		gmac_swfree_min_update(psc);
274 	}
275 	/*
276 	 * If only one GMAC is running or this is port0, reset the count.
277 	 */
278 	if (psc->sc_running != 3 || !sc->sc_port1)
279 		psc->sc_rxpkts_per_sec = 0;
280 
281 	mii_tick(&sc->sc_mii);
282 	if (sc->sc_if.if_flags & IFF_RUNNING)
283 		callout_schedule(&sc->sc_mii_ch, hz);
284 
285 	splx(s);
286 }
287 
288 static int
289 gmc_mediachange(struct ifnet *ifp)
290 {
291 	struct gmc_softc * const sc = ifp->if_softc;
292 
293 	if ((ifp->if_flags & IFF_UP) == 0)
294 		return 0;
295 
296 	return mii_mediachg(&sc->sc_mii);
297 }
298 
299 static void
300 gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
301 {
302 	struct gmc_softc * const sc = ifp->if_softc;
303 
304 	mii_pollstat(&sc->sc_mii);
305 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
306 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
307 }
308 
309 static void
310 gmc_mii_statchg(struct ifnet *ifp)
311 {
312 	struct gmc_softc * const sc = ifp->if_softc;
313 	uint32_t gmac_status;
314 
315 	gmac_status = sc->sc_gmac_status;
316 
317 	gmac_status &= ~STATUS_PHYMODE_MASK;
318 	gmac_status |= STATUS_PHYMODE_RGMII_A;
319 
320 	gmac_status &= ~STATUS_SPEED_MASK;
321 	if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
322 		gmac_status |= STATUS_SPEED_1000M;
323 	} else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
324 		gmac_status |= STATUS_SPEED_100M;
325 	} else {
326 		gmac_status |= STATUS_SPEED_10M;
327 	}
328 
329         if (sc->sc_mii.mii_media_active & IFM_FDX)
330 		gmac_status |= STATUS_DUPLEX_FULL;
331 	else
332 		gmac_status &= ~STATUS_DUPLEX_FULL;
333 
334         if (sc->sc_mii.mii_media_status & IFM_ACTIVE)
335 		gmac_status |= STATUS_LINK_ON;
336 	else
337 		gmac_status &= ~STATUS_LINK_ON;
338 
339 	if (sc->sc_gmac_status != gmac_status) {
340 		aprint_debug_dev(sc->sc_dev,
341 		    "status change old=%#x new=%#x active=%#x\n",
342 		    sc->sc_gmac_status, gmac_status,
343 		    sc->sc_mii.mii_media_active);
344 		sc->sc_gmac_status = gmac_status;
345 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
346 		    sc->sc_gmac_status);
347 	}
348 
349 	(*sc->sc_mii.mii_writereg)(sc->sc_dev, sc->sc_phy, 0x0018, 0x0041);
350 }
351 
352 static int
353 gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
354 {
355 	struct gmc_softc * const sc = ifp->if_softc;
356 	struct ifreq * const ifr = data;
357 	int s;
358 	int error;
359 	s = splnet();
360 
361 	switch (cmd) {
362 	case SIOCSIFMEDIA:
363 	case SIOCGIFMEDIA:
364 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
365 		break;
366 	default:
367 		error = ether_ioctl(ifp, cmd, data);
368 		if (error == ENETRESET) {
369 			if (ifp->if_flags & IFF_RUNNING) {
370 				/*
371 				 * If the interface is running, we have to
372 				 * update its multicast filter.
373 				 */
374 				gmc_filter_change(sc);
375 			}
376 			error = 0;
377 		}
378 	}
379 
380 	splx(s);
381 	return error;
382 }
383 
384 static void
385 gmc_ifstart(struct ifnet *ifp)
386 {
387 	struct gmc_softc * const sc = ifp->if_softc;
388 
389 #if 0
390 	if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0)
391 		return;
392 #endif
393 	if ((ifp->if_flags & IFF_RUNNING) == 0)
394 		return;
395 
396 	for (;;) {
397 		struct mbuf *m;
398 		IF_DEQUEUE(&ifp->if_snd, m);
399 		if (m == NULL)
400 			break;
401 		if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
402 			IF_PREPEND(&ifp->if_snd, m);
403 			ifp->if_flags |= IFF_OACTIVE;
404 			break;
405 		}
406 	}
407 }
408 
409 static void
410 gmc_ifstop(struct ifnet *ifp, int disable)
411 {
412 	struct gmc_softc * const sc = ifp->if_softc;
413 	struct gmac_softc * const psc = sc->sc_psc;
414 
415 	psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
416 	psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
417 	psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
418 	psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
419 	psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
420 	psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
421 	if (psc->sc_running == 0) {
422 		psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
423 		KASSERT(psc->sc_int_enabled[0] == 0);
424 		KASSERT(psc->sc_int_enabled[1] == 0);
425 		KASSERT(psc->sc_int_enabled[2] == 0);
426 		KASSERT(psc->sc_int_enabled[3] == 0);
427 		KASSERT(psc->sc_int_enabled[4] == 0);
428 	} else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
429 			== sc->sc_port1) {
430 		psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
431 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
432 		    psc->sc_int_select[4]);
433 	}
434 	gmac_intr_update(psc);
435 	if (disable) {
436 #if 0
437 		if (psc->sc_running == 0) {
438 			gmac_mapcache_destroy(&psc->sc_txmaps);
439 			gmac_mapcache_destroy(&psc->sc_rxmaps);
440 		}
441 #endif
442 	}
443 }
444 
445 static int
446 gmc_ifinit(struct ifnet *ifp)
447 {
448 	struct gmc_softc * const sc = ifp->if_softc;
449 	struct gmac_softc * const psc = sc->sc_psc;
450 	uint32_t new, mask;
451 
452 	gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
453 	gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
454 
455 	if (sc->sc_rxq == NULL) {
456 		gmac_hwqmem_t *hqm;
457 		hqm = gmac_hwqmem_create(psc->sc_rxmaps, 16, /*RXQ_NDESCS,*/ 1,
458 		   HQM_CONSUMER|HQM_RX);
459 		sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
460 		    sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
461 		    GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
462 		if (sc->sc_rxq == NULL) {
463 			gmac_hwqmem_destroy(hqm);
464 			goto failed;
465 		}
466 		sc->sc_rxq->hwq_ifp = ifp;
467 		sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
468 	}
469 
470 	if (sc->sc_txq[0] == NULL) {
471 		gmac_hwqueue_t *hwq, *last_hwq;
472 		gmac_hwqmem_t *hqm;
473 		size_t i;
474 
475 		hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
476 		   HQM_PRODUCER|HQM_TX);
477 		KASSERT(hqm != NULL);
478 		for (i = 0; i < __arraycount(sc->sc_txq); i++) {
479 			sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
480 			    sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
481 			    GMAC_SW_TX_Q_BASE, i);
482 			if (sc->sc_txq[i] == NULL) {
483 				if (i == 0)
484 					gmac_hwqmem_destroy(hqm);
485 				goto failed;
486 			}
487 			sc->sc_txq[i]->hwq_ifp = ifp;
488 
489 			last_hwq = NULL;
490 			SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
491 			    hwq_link) {
492 				if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
493 					break;
494 				last_hwq = hwq;
495 			}
496 			if (last_hwq == NULL)
497 				SLIST_INSERT_HEAD(
498 				    &psc->sc_hwfreeq->hwq_producers,
499 				    sc->sc_txq[i], hwq_link);
500 			else
501 				SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
502 				    hwq_link);
503 		}
504 	}
505 
506 	gmc_filter_change(sc);
507 
508 	mask = DMAVR_LOOPBACK|DMAVR_DROP_SMALL_ACK|DMAVR_EXTRABYTES_MASK
509 	    |DMAVR_RXBURSTSIZE_MASK|DMAVR_RXBUSWIDTH_MASK
510 	    |DMAVR_TXBURSTSIZE_MASK|DMAVR_TXBUSWIDTH_MASK;
511 	new = DMAVR_RXDMA_ENABLE|DMAVR_TXDMA_ENABLE
512 	    |DMAVR_EXTRABYTES(2)
513 	    |DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
514 	    |DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
515 	    |DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
516 	    |DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
517 	new |= sc->sc_dmavr & ~mask;
518 	if (sc->sc_dmavr != new) {
519 		sc->sc_dmavr = new;
520 		bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
521 		    sc->sc_dmavr);
522 		aprint_debug_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
523 		    sc->sc_dmavr,
524 		    bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
525 	}
526 
527 	mask = CONFIG0_MAXLEN_MASK|CONFIG0_TX_DISABLE|CONFIG0_RX_DISABLE
528 	    |CONFIG0_LOOPBACK|/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
529 	    |CONFIG0_RGMII_INBAND_STATUS_ENABLE;
530 	new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536)|CONFIG0_R_LATCHED_MMII;
531 	new |= (sc->sc_gmac_config[0] & ~mask);
532 	if (sc->sc_gmac_config[0] != new) {
533 		sc->sc_gmac_config[0] = new;
534 		bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
535 		    sc->sc_gmac_config[0]);
536 		aprint_debug_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
537 		    sc->sc_gmac_config[0],
538 		    bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
539 	}
540 
541 	psc->sc_rxpkts_per_sec +=
542 	    gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
543 
544 	/*
545 	 * If we will be the only active interface, make sure the sw freeq
546 	 * interrupt gets routed to use.
547 	 */
548 	if (psc->sc_running == 0
549 	    && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
550 		psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
551 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
552 		    psc->sc_int_select[4]);
553 	}
554 	sc->sc_int_enabled[0] = sc->sc_int_mask[0]
555 	    & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
556 	sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
557 	sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
558 	    & (INT4_TX_FAIL|INT4_MIB_HEMIWRAP|INT4_RX_FIFO_OVRN
559 	       |INT4_RGMII_STSCHG));
560 
561 	psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
562 	psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
563 	psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
564 
565 	gmac_intr_update(psc);
566 
567 	if ((ifp->if_flags & IFF_RUNNING) == 0)
568 		mii_tick(&sc->sc_mii);
569 
570 	ifp->if_flags |= IFF_RUNNING;
571 	psc->sc_running |= (sc->sc_port1 ? 2 : 1);
572 
573 	callout_schedule(&sc->sc_mii_ch, hz);
574 
575 	return 0;
576 
577 failed:
578 	gmc_ifstop(ifp, true);
579 	return ENOMEM;
580 }
581 
582 static int
583 gmc_intr(void *arg)
584 {
585 	struct gmc_softc * const sc = arg;
586 	uint32_t int0_status, int1_status, int4_status;
587 	uint32_t status;
588 	bool do_ifstart = false;
589 	int rv = 0;
590 
591 	aprint_debug_dev(sc->sc_dev, "gmac_intr: entry\n");
592 
593 	int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
594 	    GMAC_INT0_STATUS);
595 	int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
596 	    GMAC_INT1_STATUS);
597 	int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
598 	    GMAC_INT4_STATUS);
599 
600 	aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
601 	    int0_status, int1_status,
602 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
603 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
604 	    int4_status);
605 
606 #if 0
607 	aprint_debug_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
608 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
609 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
610 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
611 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
612 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
613 #endif
614 
615 	status = int0_status & sc->sc_int_mask[0];
616 	if (status & (INT0_TXDERR|INT0_TXPERR)) {
617 		aprint_error_dev(sc->sc_dev,
618 		    "transmit%s%s error: %#x %08x bufaddr %#x\n",
619 		    status & INT0_TXDERR ? " data" : "",
620 		    status & INT0_TXPERR ? " protocol" : "",
621 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
622 		    GMAC_DMA_TX_CUR_DESC),
623 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
624 		    GMAC_SW_TX_Q0_RWPTR),
625 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
626 		    GMAC_DMA_TX_DESC2));
627 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
628 		    status & (INT0_TXDERR|INT0_TXPERR));
629 		Debugger();
630 	}
631 	if (status & (INT0_RXDERR|INT0_RXPERR)) {
632 		aprint_error_dev(sc->sc_dev,
633 		    "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
634 		    status & INT0_RXDERR ? " data" : "",
635 		    status & INT0_RXPERR ? " protocol" : "",
636 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
637 		    GMAC_DMA_RX_CUR_DESC),
638 		bus_space_read_4(sc->sc_iot, sc->sc_ioh,
639 		    GMAC_SWFREEQ_RWPTR),
640 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
641 		    GMAC_DMA_RX_DESC0),
642 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
643 		    GMAC_DMA_RX_DESC1),
644 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
645 		    GMAC_DMA_RX_DESC2),
646 		bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
647 		    GMAC_DMA_RX_DESC3));
648 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
649 		    status & (INT0_RXDERR|INT0_RXPERR));
650 		    Debugger();
651 	}
652 	if (status & INT0_SWTXQ_EOF) {
653 		status &= INT0_SWTXQ_EOF;
654 		for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
655 			if (status & INT0_SWTXQn_EOF(i)) {
656 				gmac_hwqueue_sync(sc->sc_txq[i]);
657 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
658 				    GMAC_INT0_STATUS,
659 				    sc->sc_int_mask[0] & (INT0_SWTXQn_EOF(i)|INT0_SWTXQn_FIN(i)));
660 				status &= ~INT0_SWTXQn_EOF(i);
661 			}
662 		}
663 		do_ifstart = true;
664 		rv = 1;
665 	}
666 
667 	if (int4_status & INT4_SW_FREEQ_EMPTY) {
668 		struct gmac_softc * const psc = sc->sc_psc;
669 		psc->sc_rxpkts_per_sec +=
670 		    gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
671 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
672 		    status & INT4_SW_FREEQ_EMPTY);
673 		rv = 1;
674 	}
675 
676 	status = int1_status & sc->sc_int_mask[1];
677 	if (status & INT1_DEF_RXQ_EOF) {
678 		struct gmac_softc * const psc = sc->sc_psc;
679 		psc->sc_rxpkts_per_sec +=
680 		    gmac_hwqueue_consume(sc->sc_rxq, psc->sc_swfree_min);
681 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
682 		    status & INT1_DEF_RXQ_EOF);
683 		rv = 1;
684 	}
685 
686 	status = int4_status & sc->sc_int_enabled[4];
687 	if (status & INT4_TX_FAIL) {
688 	}
689 	if (status & INT4_MIB_HEMIWRAP) {
690 	}
691 	if (status & INT4_RX_XON) {
692 	}
693 	if (status & INT4_RX_XOFF) {
694 	}
695 	if (status & INT4_TX_XON) {
696 	}
697 	if (status & INT4_TX_XOFF) {
698 	}
699 	if (status & INT4_RX_FIFO_OVRN) {
700 #if 0
701 		if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS) {
702 			sc->sc_psc->sc_swfree_min++;
703 			gmac_swfree_min_update(psc);
704 		}
705 #endif
706 		sc->sc_if.if_ierrors++;
707 	}
708 	if (status & INT4_RGMII_STSCHG) {
709 		mii_pollstat(&sc->sc_mii);
710 	}
711 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, status);
712 
713 	if (do_ifstart)
714 		gmc_ifstart(&sc->sc_if);
715 
716 	aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
717 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
718 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
719 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
720 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
721 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
722 	aprint_debug_dev(sc->sc_dev, "gmac_intr: exit rv=%d\n", rv);
723 	return rv;
724 }
725 
726 static int
727 gmc_match(device_t parent, cfdata_t cf, void *aux)
728 {
729 	struct gmac_softc *psc = device_private(parent);
730 	struct gmac_attach_args *gma = aux;
731 
732 	if ((unsigned int)gma->gma_phy > 31)
733 		return 0;
734 	if ((unsigned int)gma->gma_port > 1)
735 		return 0;
736 	if (gma->gma_intr < 1 || gma->gma_intr > 2)
737 		return 0;
738 
739 	if (psc->sc_ports & (1 << gma->gma_port))
740 		return 0;
741 
742 	return 1;
743 }
744 
745 static void
746 gmc_attach(device_t parent, device_t self, void *aux)
747 {
748 	struct gmac_softc * const psc = device_private(parent);
749 	struct gmc_softc * const sc = device_private(self);
750 	struct gmac_attach_args *gma = aux;
751 	struct ifnet * const ifp = &sc->sc_if;
752 	static const char eaddrs[2][6] = {
753 		"\x0\x52\xc3\x11\x22\x33",
754 		"\x0\x52\xc3\x44\x55\x66",
755 	};
756 
757 	psc->sc_ports |= 1 << gma->gma_port;
758 	sc->sc_port1 = (gma->gma_port == 1);
759 	sc->sc_phy = gma->gma_phy;
760 
761 	sc->sc_dev = self;
762 	sc->sc_psc = psc;
763 	sc->sc_iot = psc->sc_iot;
764 	sc->sc_ioh = psc->sc_ioh;
765 	sc->sc_dmat = psc->sc_dmat;
766 
767 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
768 	    GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
769 	    &sc->sc_dma_ioh);
770 	bus_space_subregion(sc->sc_iot, sc->sc_ioh,
771 	    GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
772 	    &sc->sc_gmac_ioh);
773 	aprint_normal("\n");
774 	aprint_naive("\n");
775 
776 	strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
777 	ifp->if_flags = IFF_SIMPLEX|IFF_MULTICAST|IFF_BROADCAST;
778 	ifp->if_softc = sc;
779 	ifp->if_ioctl = gmc_ifioctl;
780 	ifp->if_stop  = gmc_ifstop;
781 	ifp->if_start = gmc_ifstart;
782 	ifp->if_init  = gmc_ifinit;
783 
784 	IFQ_SET_READY(&ifp->if_snd);
785 
786 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
787 	sc->sc_ec.ec_mii = &sc->sc_mii;
788 
789 	sc->sc_mii.mii_ifp = ifp;
790 	sc->sc_mii.mii_statchg = gmc_mii_statchg;
791 	sc->sc_mii.mii_readreg = gma->gma_mii_readreg;
792 	sc->sc_mii.mii_writereg = gma->gma_mii_writereg;
793 
794 	ifmedia_init(&sc->sc_mii.mii_media, 0, gmc_mediachange,
795 	   gmc_mediastatus);
796 
797 	if_attach(ifp);
798 	ether_ifattach(ifp, eaddrs[gma->gma_port]);
799 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
800 	    gma->gma_phy, MII_OFFSET_ANY, 0);
801 
802 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
803 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
804 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
805 	} else {
806 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
807 //		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
808 	}
809 
810 	sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
811 	    GMAC_STATUS);
812 	sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
813 	    GMAC_STA_ADD0);
814 	sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
815 	    GMAC_STA_ADD1);
816 	sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
817 	    GMAC_STA_ADD2);
818 	sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
819 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
820 	sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
821 	    sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
822 	sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
823 	    GMAC_RX_FILTER);
824 	sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
825 	    GMAC_CONFIG0);
826 	sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
827 
828 	/* sc->sc_int_enabled is already zeroed */
829 	sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
830 	sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
831 	sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
832 	sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
833 	sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
834 
835 	if (!sc->sc_port1) {
836 	sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
837 	    gmc_intr, sc);
838 	KASSERT(sc->sc_ih != NULL);
839 	}
840 
841 	callout_init(&sc->sc_mii_ch, 0);
842 	callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
843 
844 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
845 	     ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
846 }
847 
848 CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
849     gmc_match, gmc_attach, NULL, NULL);
850