xref: /netbsd-src/sys/dev/ic/bcmgenet.c (revision 4f25af06542d9e24fe9af105c8cc6f3a04d61ba0)
1 /* $NetBSD: bcmgenet.c,v 1.22 2024/10/06 19:34:06 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2020 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Broadcom GENETv5
31  */
32 
33 #include "opt_ddb.h"
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: bcmgenet.c,v 1.22 2024/10/06 19:34:06 skrll Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/device.h>
41 #include <sys/intr.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/callout.h>
46 #include <sys/cprng.h>
47 
48 #include <sys/rndsource.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_ether.h>
53 #include <net/if_media.h>
54 #include <net/bpf.h>
55 
56 #include <dev/mii/miivar.h>
57 
58 #include <dev/ic/bcmgenetreg.h>
59 #include <dev/ic/bcmgenetvar.h>
60 
61 CTASSERT(MCLBYTES == 2048);
62 
63 #ifdef GENET_DEBUG
64 #define	DPRINTF(...)	printf(##__VA_ARGS__)
65 #else
66 #define	DPRINTF(...)	((void)0)
67 #endif
68 
69 #define	TX_MAX_SEGS		128
70 #define	TX_DESC_COUNT		256 /* GENET_DMA_DESC_COUNT */
71 #define	RX_DESC_COUNT		256 /* GENET_DMA_DESC_COUNT */
72 #define	MII_BUSY_RETRY		1000
73 #define	GENET_MAX_MDF_FILTER	17
74 
75 #define	TX_SKIP(n, o)		(((n) + (o)) % TX_DESC_COUNT)
76 #define	TX_NEXT(n)		TX_SKIP(n, 1)
77 #define	RX_NEXT(n)		(((n) + 1) % RX_DESC_COUNT)
78 
79 #define	GENET_LOCK(sc)			mutex_enter(&(sc)->sc_lock)
80 #define	GENET_UNLOCK(sc)		mutex_exit(&(sc)->sc_lock)
81 #define	GENET_ASSERT_LOCKED(sc)		KASSERT(mutex_owned(&(sc)->sc_lock))
82 
83 #define	GENET_TXLOCK(sc)		mutex_enter(&(sc)->sc_txlock)
84 #define	GENET_TXUNLOCK(sc)		mutex_exit(&(sc)->sc_txlock)
85 #define	GENET_ASSERT_TXLOCKED(sc)	KASSERT(mutex_owned(&(sc)->sc_txlock))
86 
87 #define	RD4(sc, reg)			\
88 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
89 #define	WR4(sc, reg, val)		\
90 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
91 
92 static int
93 genet_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
94 {
95 	struct genet_softc *sc = device_private(dev);
96 	int retry;
97 
98 	WR4(sc, GENET_MDIO_CMD,
99 	    GENET_MDIO_READ | GENET_MDIO_START_BUSY |
100 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
101 	    __SHIFTIN(reg, GENET_MDIO_REG));
102 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
103 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) {
104 			*val = RD4(sc, GENET_MDIO_CMD) & 0xffff;
105 			break;
106 		}
107 		delay(10);
108 	}
109 
110 	if (retry == 0) {
111 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
112 		    phy, reg);
113 		return ETIMEDOUT;
114 	}
115 
116 	return 0;
117 }
118 
119 static int
120 genet_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
121 {
122 	struct genet_softc *sc = device_private(dev);
123 	int retry;
124 
125 	WR4(sc, GENET_MDIO_CMD,
126 	    val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY |
127 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
128 	    __SHIFTIN(reg, GENET_MDIO_REG));
129 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
130 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
131 			break;
132 		delay(10);
133 	}
134 
135 	if (retry == 0) {
136 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
137 		    phy, reg);
138 		return ETIMEDOUT;
139 	}
140 
141 	return 0;
142 }
143 
144 static void
145 genet_update_link(struct genet_softc *sc)
146 {
147 	struct mii_data *mii = &sc->sc_mii;
148 	uint32_t val;
149 	u_int speed;
150 
151 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
152 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
153 		speed = GENET_UMAC_CMD_SPEED_1000;
154 	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
155 		speed = GENET_UMAC_CMD_SPEED_100;
156 	else
157 		speed = GENET_UMAC_CMD_SPEED_10;
158 
159 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
160 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
161 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
162 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
163 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII)
164 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
165 	else
166 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
167 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
168 
169 	val = RD4(sc, GENET_UMAC_CMD);
170 	val &= ~GENET_UMAC_CMD_SPEED;
171 	val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED);
172 	WR4(sc, GENET_UMAC_CMD, val);
173 }
174 
175 static void
176 genet_mii_statchg(struct ifnet *ifp)
177 {
178 	struct genet_softc * const sc = ifp->if_softc;
179 
180 	genet_update_link(sc);
181 }
182 
183 static void
184 genet_setup_txdesc(struct genet_softc *sc, int index, int flags,
185     bus_addr_t paddr, u_int len)
186 {
187 	uint32_t status;
188 
189 	status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN);
190 
191 	WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
192 	WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
193 	WR4(sc, GENET_TX_DESC_STATUS(index), status);
194 }
195 
196 static int
197 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m)
198 {
199 	bus_dma_segment_t *segs;
200 	int error, nsegs, cur, i;
201 	uint32_t flags;
202 	bool nospace;
203 
204 	/* at least one descriptor free ? */
205 	if (sc->sc_tx.queued >= TX_DESC_COUNT - 1)
206 		return -1;
207 
208 	error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
209 	    sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
210 	if (error == EFBIG) {
211 		device_printf(sc->sc_dev,
212 		    "TX packet needs too many DMA segments, dropping...\n");
213 		return -2;
214 	}
215 	if (error != 0) {
216 		device_printf(sc->sc_dev,
217 		    "TX packet cannot be mapped, retried...\n");
218 		return 0;
219 	}
220 
221 	segs = sc->sc_tx.buf_map[index].map->dm_segs;
222 	nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs;
223 
224 	nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs;
225 	if (nospace) {
226 		bus_dmamap_unload(sc->sc_tx.buf_tag,
227 		    sc->sc_tx.buf_map[index].map);
228 		/* XXX coalesce and retry ? */
229 		return -1;
230 	}
231 
232 	bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map,
233 	    0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE);
234 
235 	/* stored in same index as loaded map */
236 	sc->sc_tx.buf_map[index].mbuf = m;
237 
238 	flags = GENET_TX_DESC_STATUS_SOP |
239 		GENET_TX_DESC_STATUS_CRC |
240 		GENET_TX_DESC_STATUS_QTAG;
241 
242 	for (cur = index, i = 0; i < nsegs; i++) {
243 		if (i == nsegs - 1)
244 			flags |= GENET_TX_DESC_STATUS_EOP;
245 
246 		genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
247 		    segs[i].ds_len);
248 
249 		if (i == 0)
250 			flags &= ~GENET_TX_DESC_STATUS_SOP;
251 		cur = TX_NEXT(cur);
252 	}
253 
254 	return nsegs;
255 }
256 
257 static void
258 genet_setup_rxdesc(struct genet_softc *sc, int index,
259     bus_addr_t paddr, bus_size_t len)
260 {
261 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
262 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
263 }
264 
265 static int
266 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m)
267 {
268 	int error;
269 
270 	error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag,
271 	    sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
272 	if (error != 0)
273 		return error;
274 
275 	bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
276 	    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
277 	    BUS_DMASYNC_PREREAD);
278 
279 	sc->sc_rx.buf_map[index].mbuf = m;
280 	genet_setup_rxdesc(sc, index,
281 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr,
282 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len);
283 
284 	return 0;
285 }
286 
287 static struct mbuf *
288 genet_alloc_mbufcl(struct genet_softc *sc)
289 {
290 	struct mbuf *m;
291 
292 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
293 	if (m != NULL)
294 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
295 
296 	return m;
297 }
298 
299 static void
300 genet_enable_intr(struct genet_softc *sc)
301 {
302 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
303 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
304 }
305 
306 static void
307 genet_disable_intr(struct genet_softc *sc)
308 {
309 	/* Disable interrupts */
310 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
311 	WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff);
312 }
313 
314 static void
315 genet_tick(void *softc)
316 {
317 	struct genet_softc *sc = softc;
318 	struct mii_data *mii = &sc->sc_mii;
319 
320 	GENET_LOCK(sc);
321 	mii_tick(mii);
322 	if ((sc->sc_if_flags & IFF_RUNNING) != 0)
323 		callout_schedule(&sc->sc_stat_ch, hz);
324 	GENET_UNLOCK(sc);
325 }
326 
327 static void
328 genet_setup_rxfilter_mdf(struct genet_softc *sc, u_int n, const uint8_t *ea)
329 {
330 	uint32_t addr0 = (ea[0] << 8) | ea[1];
331 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
332 
333 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
334 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
335 }
336 
337 static void
338 genet_setup_rxfilter(struct genet_softc *sc)
339 {
340 	struct ethercom *ec = &sc->sc_ec;
341 	struct ifnet *ifp = &ec->ec_if;
342 	struct ether_multistep step;
343 	struct ether_multi *enm;
344 	uint32_t cmd, mdf_ctrl;
345 	u_int n;
346 
347 	GENET_ASSERT_LOCKED(sc);
348 
349 	ETHER_LOCK(ec);
350 
351 	cmd = RD4(sc, GENET_UMAC_CMD);
352 
353 	/*
354 	 * Count the required number of hardware filters. We need one
355 	 * for each multicast address, plus one for our own address and
356 	 * the broadcast address.
357 	 */
358 	ETHER_FIRST_MULTI(step, ec, enm);
359 	for (n = 2; enm != NULL; n++)
360 		ETHER_NEXT_MULTI(step, enm);
361 
362 	if (n > GENET_MAX_MDF_FILTER)
363 		ec->ec_flags |= ETHER_F_ALLMULTI;
364 	else
365 		ec->ec_flags &= ~ETHER_F_ALLMULTI;
366 
367 	if ((sc->sc_if_flags & IFF_PROMISC) != 0) {
368 		ec->ec_flags |= ETHER_F_ALLMULTI;
369 		cmd |= GENET_UMAC_CMD_PROMISC;
370 		mdf_ctrl = 0;
371 	} else {
372 		cmd &= ~GENET_UMAC_CMD_PROMISC;
373 		genet_setup_rxfilter_mdf(sc, 0, ifp->if_broadcastaddr);
374 		genet_setup_rxfilter_mdf(sc, 1, CLLADDR(ifp->if_sadl));
375 		ETHER_FIRST_MULTI(step, ec, enm);
376 		for (n = 2; enm != NULL; n++) {
377 			genet_setup_rxfilter_mdf(sc, n, enm->enm_addrlo);
378 			ETHER_NEXT_MULTI(step, enm);
379 		}
380 		mdf_ctrl = __BITS(GENET_MAX_MDF_FILTER - 1,
381 				  GENET_MAX_MDF_FILTER - n);
382 	}
383 
384 	WR4(sc, GENET_UMAC_CMD, cmd);
385 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
386 
387 	ETHER_UNLOCK(ec);
388 }
389 
390 static int
391 genet_reset(struct genet_softc *sc)
392 {
393 	uint32_t val;
394 
395 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
396 	val |= GENET_SYS_RBUF_FLUSH_RESET;
397 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
398 	delay(10);
399 
400 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
401 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
402 	delay(10);
403 
404 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
405 	delay(10);
406 
407 	WR4(sc, GENET_UMAC_CMD, 0);
408 	WR4(sc, GENET_UMAC_CMD,
409 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
410 	delay(10);
411 	WR4(sc, GENET_UMAC_CMD, 0);
412 
413 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
414 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
415 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
416 
417 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
418 
419 	val = RD4(sc, GENET_RBUF_CTRL);
420 	val |= GENET_RBUF_ALIGN_2B;
421 	WR4(sc, GENET_RBUF_CTRL, val);
422 
423 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
424 
425 	return 0;
426 }
427 
428 static void
429 genet_set_rxthresh(struct genet_softc *sc, int qid, int usecs, int count)
430 {
431 	int ticks;
432 	uint32_t val;
433 
434 	/* convert to 125MHz/1024 ticks */
435 	ticks = howmany(usecs * 125, 1024);
436 
437 	if (count < 1)
438 		count = 1;
439 	if (count > GENET_INTR_THRESHOLD_MASK)
440 		count = GENET_INTR_THRESHOLD_MASK;
441 	if (ticks < 0)
442 		ticks = 0;
443 	if (ticks > GENET_DMA_RING_TIMEOUT_MASK)
444 		ticks = GENET_DMA_RING_TIMEOUT_MASK;
445 
446 	WR4(sc, GENET_RX_DMA_MBUF_DONE_THRES(qid), count);
447 
448 	val = RD4(sc, GENET_RX_DMA_RING_TIMEOUT(qid));
449 	val &= ~GENET_DMA_RING_TIMEOUT_MASK;
450 	val |= ticks;
451 	WR4(sc, GENET_RX_DMA_RING_TIMEOUT(qid), val);
452 }
453 
454 static void
455 genet_set_txthresh(struct genet_softc *sc, int qid, int count)
456 {
457 	if (count < 1)
458 		count = 1;
459 	if (count > GENET_INTR_THRESHOLD_MASK)
460 		count = GENET_INTR_THRESHOLD_MASK;
461 
462 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), count);
463 }
464 
465 static void
466 genet_init_rings(struct genet_softc *sc, int qid)
467 {
468 	uint32_t val;
469 
470 	/* TX ring */
471 
472 	sc->sc_tx.queued = 0;
473 	sc->sc_tx.cidx = sc->sc_tx.pidx = 0;
474 
475 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
476 
477 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
478 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
479 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
480 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
481 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
482 	    __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) |
483 	    __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH));
484 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
485 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
486 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
487 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
488 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
489 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
490 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
491 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
492 
493 	/* interrupt after 10 packets or when ring empty */
494 	genet_set_txthresh(sc, qid, 10);
495 
496 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
497 
498 	/* Enable transmit DMA */
499 	val = RD4(sc, GENET_TX_DMA_CTRL);
500 	val |= GENET_TX_DMA_CTRL_EN;
501 	val |= GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
502 	WR4(sc, GENET_TX_DMA_CTRL, val);
503 
504 	/* RX ring */
505 
506 	sc->sc_rx.cidx = sc->sc_rx.pidx = 0;
507 
508 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
509 
510 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
511 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
512 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
513 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
514 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
515 	    __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) |
516 	    __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH));
517 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
518 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
519 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
520 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
521 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
522 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
523 	    __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) |
524 	    __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI));
525 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
526 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
527 
528 	/*
529 	 * interrupt on first packet,
530 	 * mitigation timeout timeout 57 us (~84 minimal packets at 1Gbit/s)
531 	 */
532 	genet_set_rxthresh(sc, qid, 57, 10);
533 
534 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
535 
536 	/* Enable receive DMA */
537 	val = RD4(sc, GENET_RX_DMA_CTRL);
538 	val |= GENET_RX_DMA_CTRL_EN;
539 	val |= GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
540 	WR4(sc, GENET_RX_DMA_CTRL, val);
541 }
542 
543 static int
544 genet_init_locked(struct genet_softc *sc)
545 {
546 	struct ifnet *ifp = &sc->sc_ec.ec_if;
547 	struct mii_data *mii = &sc->sc_mii;
548 	uint32_t val;
549 	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
550 
551 	GENET_ASSERT_LOCKED(sc);
552 	GENET_ASSERT_TXLOCKED(sc);
553 
554 	if ((ifp->if_flags & IFF_RUNNING) != 0)
555 		return 0;
556 
557 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII ||
558 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_ID ||
559 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID ||
560 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_TXID)
561 		WR4(sc, GENET_SYS_PORT_CTRL,
562 		    GENET_SYS_PORT_MODE_EXT_GPHY);
563 	else
564 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
565 
566 	/* Write hardware address */
567 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
568 	    (enaddr[0] << 24);
569 	WR4(sc, GENET_UMAC_MAC0, val);
570 	val = enaddr[5] | (enaddr[4] << 8);
571 	WR4(sc, GENET_UMAC_MAC1, val);
572 
573 	/* Setup RX filter */
574 	sc->sc_if_flags = ifp->if_flags;
575 	genet_setup_rxfilter(sc);
576 
577 	/* Setup TX/RX rings */
578 	genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE);
579 
580 	/* Enable transmitter and receiver */
581 	val = RD4(sc, GENET_UMAC_CMD);
582 	val |= GENET_UMAC_CMD_TXEN;
583 	val |= GENET_UMAC_CMD_RXEN;
584 	WR4(sc, GENET_UMAC_CMD, val);
585 
586 	/* Enable interrupts */
587 	genet_enable_intr(sc);
588 
589 	GENET_ASSERT_TXLOCKED(sc);
590 	sc->sc_txrunning = true;
591 
592 	ifp->if_flags |= IFF_RUNNING;
593 	sc->sc_if_flags |= IFF_RUNNING;
594 
595 	mii_mediachg(mii);
596 	callout_schedule(&sc->sc_stat_ch, hz);
597 
598 	return 0;
599 }
600 
601 static int
602 genet_init(struct ifnet *ifp)
603 {
604 	struct genet_softc *sc = ifp->if_softc;
605 	int error;
606 
607 	GENET_LOCK(sc);
608 	GENET_TXLOCK(sc);
609 	error = genet_init_locked(sc);
610 	GENET_TXUNLOCK(sc);
611 	GENET_UNLOCK(sc);
612 
613 	return error;
614 }
615 
616 static int
617 genet_free_txbuf(struct genet_softc *sc, int index)
618 {
619 	struct genet_bufmap *bmap;
620 
621 	bmap = &sc->sc_tx.buf_map[index];
622 	if (bmap->mbuf == NULL)
623 		return 0;
624 
625 	if (bmap->map->dm_mapsize > 0) {
626 		bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map,
627 		    0, bmap->map->dm_mapsize,
628 		    BUS_DMASYNC_POSTWRITE);
629 	}
630 	bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map);
631 	m_freem(bmap->mbuf);
632 	bmap->mbuf = NULL;
633 
634 	return 1;
635 }
636 
637 static void
638 genet_stop_locked(struct genet_softc *sc, int disable)
639 {
640 	struct ifnet *ifp = &sc->sc_ec.ec_if;
641 	uint32_t val;
642 	int i;
643 
644 	GENET_ASSERT_LOCKED(sc);
645 
646 	GENET_TXLOCK(sc);
647 	sc->sc_txrunning = false;
648 	GENET_TXUNLOCK(sc);
649 
650 	callout_halt(&sc->sc_stat_ch, &sc->sc_lock);
651 
652 	mii_down(&sc->sc_mii);
653 
654 	/* Disable receiver */
655 	val = RD4(sc, GENET_UMAC_CMD);
656 	val &= ~GENET_UMAC_CMD_RXEN;
657 	WR4(sc, GENET_UMAC_CMD, val);
658 
659 	/* Stop receive DMA */
660 	val = RD4(sc, GENET_RX_DMA_CTRL);
661 	val &= ~GENET_RX_DMA_CTRL_EN;
662 	WR4(sc, GENET_RX_DMA_CTRL, val);
663 
664 	/* Stop transmit DMA */
665 	val = RD4(sc, GENET_TX_DMA_CTRL);
666 	val &= ~GENET_TX_DMA_CTRL_EN;
667 	WR4(sc, GENET_TX_DMA_CTRL, val);
668 
669 	/* Flush data in the TX FIFO */
670 	WR4(sc, GENET_UMAC_TX_FLUSH, 1);
671 	delay(10);
672 	WR4(sc, GENET_UMAC_TX_FLUSH, 0);
673 
674 	/* Disable transmitter */
675 	val = RD4(sc, GENET_UMAC_CMD);
676 	val &= ~GENET_UMAC_CMD_TXEN;
677 	WR4(sc, GENET_UMAC_CMD, val);
678 
679 	/* Disable interrupts */
680 	genet_disable_intr(sc);
681 
682 	/* Free TX buffers */
683 	for (i=0; i<TX_DESC_COUNT; ++i)
684 		genet_free_txbuf(sc, i);
685 
686 	sc->sc_if_flags &= ~IFF_RUNNING;
687 	ifp->if_flags &= ~IFF_RUNNING;
688 }
689 
690 static void
691 genet_stop(struct ifnet *ifp, int disable)
692 {
693 	struct genet_softc * const sc = ifp->if_softc;
694 
695 	GENET_LOCK(sc);
696 	genet_stop_locked(sc, disable);
697 	GENET_UNLOCK(sc);
698 }
699 
700 static void
701 genet_rxintr(struct genet_softc *sc, int qid)
702 {
703 	struct ifnet *ifp = &sc->sc_ec.ec_if;
704 	int error, index, len, n;
705 	struct mbuf *m, *m0;
706 	uint32_t status, pidx, total;
707 	int pkts = 0;
708 
709 	pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff;
710 	total = (pidx - sc->sc_rx.cidx) & 0xffff;
711 
712 	DPRINTF("RX pidx=%08x total=%d\n", pidx, total);
713 
714 	index = sc->sc_rx.cidx % RX_DESC_COUNT;
715 	for (n = 0; n < total; n++) {
716 		status = RD4(sc, GENET_RX_DESC_STATUS(index));
717 
718 		if (status & GENET_RX_DESC_STATUS_ALL_ERRS) {
719 			if (status & GENET_RX_DESC_STATUS_OVRUN_ERR)
720 				device_printf(sc->sc_dev, "overrun\n");
721 			if (status & GENET_RX_DESC_STATUS_CRC_ERR)
722 				device_printf(sc->sc_dev, "CRC error\n");
723 			if (status & GENET_RX_DESC_STATUS_RX_ERR)
724 				device_printf(sc->sc_dev, "receive error\n");
725 			if (status & GENET_RX_DESC_STATUS_FRAME_ERR)
726 				device_printf(sc->sc_dev, "frame error\n");
727 			if (status & GENET_RX_DESC_STATUS_LEN_ERR)
728 				device_printf(sc->sc_dev, "length error\n");
729 			if_statinc(ifp, if_ierrors);
730 			goto next;
731 		}
732 
733 		if (status & GENET_RX_DESC_STATUS_OWN)
734 			device_printf(sc->sc_dev, "OWN %d of %d\n",n,total);
735 
736 		len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN);
737 		if (len < ETHER_ALIGN) {
738 			if_statinc(ifp, if_ierrors);
739 			goto next;
740 		}
741 
742 		m = sc->sc_rx.buf_map[index].mbuf;
743 
744 		if ((m0 = genet_alloc_mbufcl(sc)) == NULL) {
745 			if_statinc(ifp, if_ierrors);
746 			goto next;
747 		}
748 		MCLAIM(m0, &sc->sc_ec.ec_rx_mowner);
749 
750 		/* unload map before it gets loaded in setup_rxbuf */
751 		if (sc->sc_rx.buf_map[index].map->dm_mapsize > 0) {
752 			bus_dmamap_sync(sc->sc_rx.buf_tag,
753 			    sc->sc_rx.buf_map[index].map,
754 			    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
755 			    BUS_DMASYNC_POSTREAD);
756 		}
757 		bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map);
758 		sc->sc_rx.buf_map[index].mbuf = NULL;
759 
760 		error = genet_setup_rxbuf(sc, index, m0);
761 		if (error != 0) {
762 			m_freem(m0);
763 			if_statinc(ifp, if_ierrors);
764 
765 			/* XXX mbuf is unloaded but load failed */
766 			m_freem(m);
767 			device_printf(sc->sc_dev,
768 			    "cannot load RX mbuf. panic?\n");
769 			goto next;
770 		}
771 
772 		DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n",
773 		    n, index, status, len, len - ETHER_ALIGN);
774 
775 		m_set_rcvif(m, ifp);
776 		m->m_len = m->m_pkthdr.len = len;
777 		m_adj(m, ETHER_ALIGN);
778 
779 		if_percpuq_enqueue(ifp->if_percpuq, m);
780 		++pkts;
781 
782 next:
783 		index = RX_NEXT(index);
784 
785 		sc->sc_rx.cidx = (sc->sc_rx.cidx + 1) & 0xffff;
786 		WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
787 	}
788 
789 	if (pkts != 0)
790 		rnd_add_uint32(&sc->sc_rndsource, pkts);
791 }
792 
793 static void
794 genet_txintr(struct genet_softc *sc, int qid)
795 {
796 	struct ifnet *ifp = &sc->sc_ec.ec_if;
797 	int cidx, i, pkts = 0;
798 
799 	cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff;
800 	i = sc->sc_tx.cidx % TX_DESC_COUNT;
801 	while (sc->sc_tx.cidx != cidx) {
802 		pkts += genet_free_txbuf(sc, i);
803 		i = TX_NEXT(i);
804 		sc->sc_tx.cidx = (sc->sc_tx.cidx + 1) & 0xffff;
805 	}
806 
807 	if (pkts != 0) {
808 		if_statadd(ifp, if_opackets, pkts);
809 		rnd_add_uint32(&sc->sc_rndsource, pkts);
810 	}
811 
812 	if_schedule_deferred_start(ifp);
813 }
814 
815 static void
816 genet_start_locked(struct genet_softc *sc)
817 {
818 	struct ifnet *ifp = &sc->sc_ec.ec_if;
819 	struct mbuf *m;
820 	int nsegs, index, cnt;
821 
822 	GENET_ASSERT_TXLOCKED(sc);
823 
824 	if (!sc->sc_txrunning)
825 		return;
826 
827 	const int qid = GENET_DMA_DEFAULT_QUEUE;
828 
829 	index = sc->sc_tx.pidx % TX_DESC_COUNT;
830 	cnt = 0;
831 
832 	sc->sc_tx.queued = (RD4(sc, GENET_TX_DMA_PROD_INDEX(qid))
833 	          - sc->sc_tx.cidx) & 0xffff;
834 
835 	/* At least one descriptor free ? */
836 	if (sc->sc_tx.queued >= TX_DESC_COUNT - 1)
837 		return;
838 
839 	for (;;) {
840 		IFQ_POLL(&ifp->if_snd, m);
841 		if (m == NULL)
842 			break;
843 
844 		nsegs = genet_setup_txbuf(sc, index, m);
845 		if (nsegs <= 0) {
846 			if (nsegs == -2) {
847 				IFQ_DEQUEUE(&ifp->if_snd, m);
848 				m_freem(m);
849 				continue;
850 			}
851 			break;
852 		}
853 
854 		IFQ_DEQUEUE(&ifp->if_snd, m);
855 		bpf_mtap(ifp, m, BPF_D_OUT);
856 
857 		index = TX_SKIP(index, nsegs);
858 		sc->sc_tx.queued += nsegs;
859 		sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff;
860 		cnt++;
861 	}
862 
863 	if (cnt != 0)
864 		WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
865 }
866 
867 static void
868 genet_start(struct ifnet *ifp)
869 {
870 	struct genet_softc *sc = ifp->if_softc;
871 
872 	GENET_TXLOCK(sc);
873 	genet_start_locked(sc);
874 	GENET_TXUNLOCK(sc);
875 }
876 
877 int
878 genet_intr(void *arg)
879 {
880 	struct genet_softc *sc = arg;
881 	uint32_t val;
882 
883 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
884 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
885 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
886 
887 	if (val & GENET_IRQ_RXDMA_DONE) {
888 		GENET_LOCK(sc);
889 		genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE);
890 		GENET_UNLOCK(sc);
891 	}
892 
893 	if (val & GENET_IRQ_TXDMA_DONE) {
894 		genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE);
895 	}
896 
897 	return 1;
898 }
899 
900 static int
901 genet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
902 {
903 	struct genet_softc *sc = ifp->if_softc;
904 	int error;
905 
906 	switch (cmd) {
907 	case SIOCADDMULTI:
908 	case SIOCDELMULTI:
909 		break;
910 	default:
911 		KASSERT(IFNET_LOCKED(ifp));
912 	}
913 
914 	const int s = splnet();
915 	error = ether_ioctl(ifp, cmd, data);
916 	splx(s);
917 
918 	if (error != ENETRESET)
919 		return error;
920 
921 	error = 0;
922 
923 	if (cmd == SIOCSIFCAP)
924 		error = if_init(ifp);
925 	else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
926 		GENET_LOCK(sc);
927 		if ((sc->sc_if_flags & IFF_RUNNING) != 0)
928 			genet_setup_rxfilter(sc);
929 		GENET_UNLOCK(sc);
930 	}
931 	return error;
932 }
933 
934 static int
935 genet_ifflags_cb(struct ethercom *ec)
936 {
937 	struct ifnet * const ifp = &ec->ec_if;
938 	struct genet_softc * const sc = ifp->if_softc;
939 	int ret = 0;
940 
941 	KASSERT(IFNET_LOCKED(ifp));
942 	GENET_LOCK(sc);
943 
944 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
945 	sc->sc_if_flags = ifp->if_flags;
946 
947 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
948 		ret = ENETRESET;
949 	} else if ((change & IFF_PROMISC) != 0) {
950 		if ((sc->sc_if_flags & IFF_RUNNING) != 0)
951 			genet_setup_rxfilter(sc);
952 	}
953 	GENET_UNLOCK(sc);
954 
955 	return ret;
956 }
957 
958 static void
959 genet_get_eaddr(struct genet_softc *sc, uint8_t *eaddr)
960 {
961 	prop_dictionary_t prop = device_properties(sc->sc_dev);
962 	uint32_t maclo, machi, val;
963 	prop_data_t eaprop;
964 
965 	eaprop = prop_dictionary_get(prop, "mac-address");
966 	if (eaprop != NULL) {
967 		KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
968 		KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
969 		memcpy(eaddr, prop_data_value(eaprop),
970 		    ETHER_ADDR_LEN);
971 		return;
972 	}
973 
974 	maclo = machi = 0;
975 
976 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
977 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
978 		maclo = RD4(sc, GENET_UMAC_MAC0);
979 		machi = RD4(sc, GENET_UMAC_MAC1) & 0xffff;
980 	}
981 
982 	if (maclo == 0 && machi == 0) {
983 		/* Create one */
984 		maclo = 0x00f2 | (cprng_strong32() & 0xffff0000);
985 		machi = cprng_strong32() & 0xffff;
986 	}
987 
988 	eaddr[0] = (maclo >> 24) & 0xff;
989 	eaddr[1] = (maclo >> 16) & 0xff;
990 	eaddr[2] = (maclo >>  8) & 0xff;
991 	eaddr[3] = (maclo >>  0) & 0xff;
992 	eaddr[4] = (machi >>  8) & 0xff;
993 	eaddr[5] = (machi >>  0) & 0xff;
994 }
995 
996 static int
997 genet_setup_dma(struct genet_softc *sc, int qid)
998 {
999 	struct mbuf *m;
1000 	int error, i;
1001 
1002 	/* Setup TX ring */
1003 	sc->sc_tx.buf_tag = sc->sc_dmat;
1004 	for (i = 0; i < TX_DESC_COUNT; i++) {
1005 		error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES,
1006 		    TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK,
1007 		    &sc->sc_tx.buf_map[i].map);
1008 		if (error != 0) {
1009 			device_printf(sc->sc_dev,
1010 			    "cannot create TX buffer map\n");
1011 			return error;
1012 		}
1013 	}
1014 
1015 	/* Setup RX ring */
1016 	sc->sc_rx.buf_tag = sc->sc_dmat;
1017 	for (i = 0; i < RX_DESC_COUNT; i++) {
1018 		error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES,
1019 		    1, MCLBYTES, 0, BUS_DMA_WAITOK,
1020 		    &sc->sc_rx.buf_map[i].map);
1021 		if (error != 0) {
1022 			device_printf(sc->sc_dev,
1023 			    "cannot create RX buffer map\n");
1024 			return error;
1025 		}
1026 		if ((m = genet_alloc_mbufcl(sc)) == NULL) {
1027 			device_printf(sc->sc_dev, "cannot allocate RX mbuf\n");
1028 			return ENOMEM;
1029 		}
1030 		error = genet_setup_rxbuf(sc, i, m);
1031 		if (error != 0) {
1032 			device_printf(sc->sc_dev, "cannot create RX buffer\n");
1033 			return error;
1034 		}
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 static void
1041 genet_claim_rxring(struct genet_softc *sc, int qid)
1042 {
1043 	struct mbuf *m;
1044 	int i;
1045 
1046 	/* Claim mbufs from RX ring */
1047 	for (i = 0; i < RX_DESC_COUNT; i++) {
1048 		m = sc->sc_rx.buf_map[i].mbuf;
1049 		if (m != NULL) {
1050 			MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1051 		}
1052 	}
1053 }
1054 
1055 int
1056 genet_attach(struct genet_softc *sc)
1057 {
1058 	struct mii_data *mii = &sc->sc_mii;
1059 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1060 	uint8_t eaddr[ETHER_ADDR_LEN];
1061 	u_int maj, min;
1062 	int mii_flags = 0;
1063 
1064 	const uint32_t rev = RD4(sc, GENET_SYS_REV_CTRL);
1065 	min = __SHIFTOUT(rev, SYS_REV_MINOR);
1066 	maj = __SHIFTOUT(rev, SYS_REV_MAJOR);
1067 	if (maj == 0)
1068 		maj++;
1069 	else if (maj == 5 || maj == 6)
1070 		maj--;
1071 
1072 	if (maj != 5) {
1073 		aprint_error(": GENETv%d.%d not supported\n", maj, min);
1074 		return ENXIO;
1075 	}
1076 
1077 	switch (sc->sc_phy_mode) {
1078 	case GENET_PHY_MODE_RGMII_TXID:
1079 		mii_flags |= MIIF_TXID;
1080 		break;
1081 	case GENET_PHY_MODE_RGMII_RXID:
1082 		mii_flags |= MIIF_RXID;
1083 		break;
1084 	case GENET_PHY_MODE_RGMII_ID:
1085 		mii_flags |= MIIF_RXID | MIIF_TXID;
1086 		break;
1087 	case GENET_PHY_MODE_RGMII:
1088 	default:
1089 		break;
1090 	}
1091 
1092 	aprint_naive("\n");
1093 	aprint_normal(": GENETv%d.%d\n", maj, min);
1094 
1095 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET);
1096 	mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET);
1097 	callout_init(&sc->sc_stat_ch, CALLOUT_MPSAFE);
1098 	callout_setfunc(&sc->sc_stat_ch, genet_tick, sc);
1099 
1100 	genet_get_eaddr(sc, eaddr);
1101 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr));
1102 
1103 	/* Soft reset EMAC core */
1104 	genet_reset(sc);
1105 
1106 	/* Setup DMA descriptors */
1107 	if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) {
1108 		aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n");
1109 		return EINVAL;
1110 	}
1111 
1112 	/* Setup ethernet interface */
1113 	ifp->if_softc = sc;
1114 	snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev));
1115 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1116 	ifp->if_extflags = IFEF_MPSAFE;
1117 	ifp->if_start = genet_start;
1118 	ifp->if_ioctl = genet_ioctl;
1119 	ifp->if_init = genet_init;
1120 	ifp->if_stop = genet_stop;
1121 	ifp->if_capabilities = 0;
1122 	ifp->if_capenable = ifp->if_capabilities;
1123 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1124 	IFQ_SET_READY(&ifp->if_snd);
1125 
1126 	/* 802.1Q VLAN-sized frames are supported */
1127 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1128 
1129 	/* Attach MII driver */
1130 	sc->sc_ec.ec_mii = mii;
1131 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
1132 	mii->mii_ifp = ifp;
1133 	mii->mii_readreg = genet_mii_readreg;
1134 	mii->mii_writereg = genet_mii_writereg;
1135 	mii->mii_statchg = genet_mii_statchg;
1136 	mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY,
1137 	    mii_flags);
1138 
1139 	if (LIST_EMPTY(&mii->mii_phys)) {
1140 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
1141 		return ENOENT;
1142 	}
1143 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1144 
1145 	/* Attach interface */
1146 	if_attach(ifp);
1147 	if_deferred_start_init(ifp, NULL);
1148 
1149 	/* Attach ethernet interface */
1150 	ether_ifattach(ifp, eaddr);
1151 	ether_set_ifflags_cb(&sc->sc_ec, genet_ifflags_cb);
1152 
1153 
1154 	/* MBUFTRACE */
1155 	genet_claim_rxring(sc, GENET_DMA_DEFAULT_QUEUE);
1156 
1157 	rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET,
1158 	    RND_FLAG_DEFAULT);
1159 
1160 	return 0;
1161 }
1162 
1163 #ifdef DDB
1164 void	genet_debug(void);
1165 
1166 void
1167 genet_debug(void)
1168 {
1169 	device_t dev = device_find_by_xname("genet0");
1170 	if (dev == NULL)
1171 		return;
1172 
1173 	struct genet_softc * const sc = device_private(dev);
1174 	const int qid = GENET_DMA_DEFAULT_QUEUE;
1175 
1176 	printf("TX CIDX = %08x (soft)\n", sc->sc_tx.cidx);
1177 	printf("TX CIDX = %08x\n", RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)));
1178 	printf("TX PIDX = %08x (soft)\n", sc->sc_tx.pidx);
1179 	printf("TX PIDX = %08x\n", RD4(sc, GENET_TX_DMA_PROD_INDEX(qid)));
1180 
1181 	printf("RX CIDX = %08x (soft)\n", sc->sc_rx.cidx);
1182 	printf("RX CIDX = %08x\n", RD4(sc, GENET_RX_DMA_CONS_INDEX(qid)));
1183 	printf("RX PIDX = %08x (soft)\n", sc->sc_rx.pidx);
1184 	printf("RX PIDX = %08x\n", RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)));
1185 }
1186 #endif
1187