xref: /netbsd-src/sys/dev/ic/bcmgenet.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1 /* $NetBSD: bcmgenet.c,v 1.20 2024/09/15 07:38:08 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2020 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Broadcom GENETv5
31  */
32 
33 #include "opt_ddb.h"
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: bcmgenet.c,v 1.20 2024/09/15 07:38:08 skrll Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/device.h>
41 #include <sys/intr.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/callout.h>
46 #include <sys/cprng.h>
47 
48 #include <sys/rndsource.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_ether.h>
53 #include <net/if_media.h>
54 #include <net/bpf.h>
55 
56 #include <dev/mii/miivar.h>
57 
58 #include <dev/ic/bcmgenetreg.h>
59 #include <dev/ic/bcmgenetvar.h>
60 
61 CTASSERT(MCLBYTES == 2048);
62 
63 #ifdef GENET_DEBUG
64 #define	DPRINTF(...)	printf(##__VA_ARGS__)
65 #else
66 #define	DPRINTF(...)	((void)0)
67 #endif
68 
69 #define	TX_MAX_SEGS		128
70 #define	TX_DESC_COUNT		256 /* GENET_DMA_DESC_COUNT */
71 #define	RX_DESC_COUNT		256 /* GENET_DMA_DESC_COUNT */
72 #define	MII_BUSY_RETRY		1000
73 #define	GENET_MAX_MDF_FILTER	17
74 
75 #define	TX_SKIP(n, o)		(((n) + (o)) % TX_DESC_COUNT)
76 #define	TX_NEXT(n)		TX_SKIP(n, 1)
77 #define	RX_NEXT(n)		(((n) + 1) % RX_DESC_COUNT)
78 
79 #define	GENET_LOCK(sc)			mutex_enter(&(sc)->sc_lock)
80 #define	GENET_UNLOCK(sc)		mutex_exit(&(sc)->sc_lock)
81 #define	GENET_ASSERT_LOCKED(sc)		KASSERT(mutex_owned(&(sc)->sc_lock))
82 
83 #define	GENET_TXLOCK(sc)		mutex_enter(&(sc)->sc_txlock)
84 #define	GENET_TXUNLOCK(sc)		mutex_exit(&(sc)->sc_txlock)
85 #define	GENET_ASSERT_TXLOCKED(sc)	KASSERT(mutex_owned(&(sc)->sc_txlock))
86 
87 #define	RD4(sc, reg)			\
88 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
89 #define	WR4(sc, reg, val)		\
90 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
91 
92 static int
93 genet_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
94 {
95 	struct genet_softc *sc = device_private(dev);
96 	int retry;
97 
98 	WR4(sc, GENET_MDIO_CMD,
99 	    GENET_MDIO_READ | GENET_MDIO_START_BUSY |
100 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
101 	    __SHIFTIN(reg, GENET_MDIO_REG));
102 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
103 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) {
104 			*val = RD4(sc, GENET_MDIO_CMD) & 0xffff;
105 			break;
106 		}
107 		delay(10);
108 	}
109 
110 	if (retry == 0) {
111 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
112 		    phy, reg);
113 		return ETIMEDOUT;
114 	}
115 
116 	return 0;
117 }
118 
119 static int
120 genet_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
121 {
122 	struct genet_softc *sc = device_private(dev);
123 	int retry;
124 
125 	WR4(sc, GENET_MDIO_CMD,
126 	    val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY |
127 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
128 	    __SHIFTIN(reg, GENET_MDIO_REG));
129 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
130 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
131 			break;
132 		delay(10);
133 	}
134 
135 	if (retry == 0) {
136 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
137 		    phy, reg);
138 		return ETIMEDOUT;
139 	}
140 
141 	return 0;
142 }
143 
144 static void
145 genet_update_link(struct genet_softc *sc)
146 {
147 	struct mii_data *mii = &sc->sc_mii;
148 	uint32_t val;
149 	u_int speed;
150 
151 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
152 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
153 		speed = GENET_UMAC_CMD_SPEED_1000;
154 	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
155 		speed = GENET_UMAC_CMD_SPEED_100;
156 	else
157 		speed = GENET_UMAC_CMD_SPEED_10;
158 
159 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
160 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
161 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
162 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
163 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII)
164 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
165 	else
166 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
167 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
168 
169 	val = RD4(sc, GENET_UMAC_CMD);
170 	val &= ~GENET_UMAC_CMD_SPEED;
171 	val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED);
172 	WR4(sc, GENET_UMAC_CMD, val);
173 }
174 
175 static void
176 genet_mii_statchg(struct ifnet *ifp)
177 {
178 	struct genet_softc * const sc = ifp->if_softc;
179 
180 	genet_update_link(sc);
181 }
182 
183 static void
184 genet_setup_txdesc(struct genet_softc *sc, int index, int flags,
185     bus_addr_t paddr, u_int len)
186 {
187 	uint32_t status;
188 
189 	status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN);
190 
191 	WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
192 	WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
193 	WR4(sc, GENET_TX_DESC_STATUS(index), status);
194 }
195 
196 static int
197 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m)
198 {
199 	bus_dma_segment_t *segs;
200 	int error, nsegs, cur, i;
201 	uint32_t flags;
202 	bool nospace;
203 
204 	/* at least one descriptor free ? */
205 	if (sc->sc_tx.queued >= TX_DESC_COUNT - 1)
206 		return -1;
207 
208 	error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
209 	    sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
210 	if (error == EFBIG) {
211 		device_printf(sc->sc_dev,
212 		    "TX packet needs too many DMA segments, dropping...\n");
213 		return -2;
214 	}
215 	if (error != 0) {
216 		device_printf(sc->sc_dev,
217 		    "TX packet cannot be mapped, retried...\n");
218 		return 0;
219 	}
220 
221 	segs = sc->sc_tx.buf_map[index].map->dm_segs;
222 	nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs;
223 
224 	nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs;
225 	if (nospace) {
226 		bus_dmamap_unload(sc->sc_tx.buf_tag,
227 		    sc->sc_tx.buf_map[index].map);
228 		/* XXX coalesce and retry ? */
229 		return -1;
230 	}
231 
232 	bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map,
233 	    0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE);
234 
235 	/* stored in same index as loaded map */
236 	sc->sc_tx.buf_map[index].mbuf = m;
237 
238 	flags = GENET_TX_DESC_STATUS_SOP |
239 		GENET_TX_DESC_STATUS_CRC |
240 		GENET_TX_DESC_STATUS_QTAG;
241 
242 	for (cur = index, i = 0; i < nsegs; i++) {
243 		if (i == nsegs - 1)
244 			flags |= GENET_TX_DESC_STATUS_EOP;
245 
246 		genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
247 		    segs[i].ds_len);
248 
249 		if (i == 0)
250 			flags &= ~GENET_TX_DESC_STATUS_SOP;
251 		cur = TX_NEXT(cur);
252 	}
253 
254 	return nsegs;
255 }
256 
257 static void
258 genet_setup_rxdesc(struct genet_softc *sc, int index,
259     bus_addr_t paddr, bus_size_t len)
260 {
261 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
262 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
263 }
264 
265 static int
266 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m)
267 {
268 	int error;
269 
270 	error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag,
271 	    sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
272 	if (error != 0)
273 		return error;
274 
275 	bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
276 	    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
277 	    BUS_DMASYNC_PREREAD);
278 
279 	sc->sc_rx.buf_map[index].mbuf = m;
280 	genet_setup_rxdesc(sc, index,
281 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr,
282 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len);
283 
284 	return 0;
285 }
286 
287 static struct mbuf *
288 genet_alloc_mbufcl(struct genet_softc *sc)
289 {
290 	struct mbuf *m;
291 
292 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
293 	if (m != NULL)
294 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
295 
296 	return m;
297 }
298 
299 static void
300 genet_enable_intr(struct genet_softc *sc)
301 {
302 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
303 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
304 }
305 
306 static void
307 genet_disable_intr(struct genet_softc *sc)
308 {
309 	/* Disable interrupts */
310 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
311 	WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff);
312 }
313 
314 static void
315 genet_tick(void *softc)
316 {
317 	struct genet_softc *sc = softc;
318 	struct mii_data *mii = &sc->sc_mii;
319 
320 	GENET_LOCK(sc);
321 	mii_tick(mii);
322 	if (sc->sc_running)
323 		callout_schedule(&sc->sc_stat_ch, hz);
324 	GENET_UNLOCK(sc);
325 }
326 
327 static void
328 genet_setup_rxfilter_mdf(struct genet_softc *sc, u_int n, const uint8_t *ea)
329 {
330 	uint32_t addr0 = (ea[0] << 8) | ea[1];
331 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
332 
333 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
334 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
335 }
336 
337 static void
338 genet_setup_rxfilter(struct genet_softc *sc)
339 {
340 	struct ethercom *ec = &sc->sc_ec;
341 	struct ifnet *ifp = &ec->ec_if;
342 	struct ether_multistep step;
343 	struct ether_multi *enm;
344 	uint32_t cmd, mdf_ctrl;
345 	u_int n;
346 
347 	GENET_ASSERT_LOCKED(sc);
348 
349 	ETHER_LOCK(ec);
350 
351 	cmd = RD4(sc, GENET_UMAC_CMD);
352 
353 	/*
354 	 * Count the required number of hardware filters. We need one
355 	 * for each multicast address, plus one for our own address and
356 	 * the broadcast address.
357 	 */
358 	ETHER_FIRST_MULTI(step, ec, enm);
359 	for (n = 2; enm != NULL; n++)
360 		ETHER_NEXT_MULTI(step, enm);
361 
362 	if (n > GENET_MAX_MDF_FILTER)
363 		ifp->if_flags |= IFF_ALLMULTI;
364 	else
365 		ifp->if_flags &= ~IFF_ALLMULTI;
366 
367 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
368 		cmd |= GENET_UMAC_CMD_PROMISC;
369 		mdf_ctrl = 0;
370 	} else {
371 		cmd &= ~GENET_UMAC_CMD_PROMISC;
372 		genet_setup_rxfilter_mdf(sc, 0, ifp->if_broadcastaddr);
373 		genet_setup_rxfilter_mdf(sc, 1, CLLADDR(ifp->if_sadl));
374 		ETHER_FIRST_MULTI(step, ec, enm);
375 		for (n = 2; enm != NULL; n++) {
376 			genet_setup_rxfilter_mdf(sc, n, enm->enm_addrlo);
377 			ETHER_NEXT_MULTI(step, enm);
378 		}
379 		mdf_ctrl = __BITS(GENET_MAX_MDF_FILTER - 1,
380 				  GENET_MAX_MDF_FILTER - n);
381 	}
382 
383 	WR4(sc, GENET_UMAC_CMD, cmd);
384 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
385 
386 	ETHER_UNLOCK(ec);
387 }
388 
389 static int
390 genet_reset(struct genet_softc *sc)
391 {
392 	uint32_t val;
393 
394 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
395 	val |= GENET_SYS_RBUF_FLUSH_RESET;
396 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
397 	delay(10);
398 
399 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
400 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
401 	delay(10);
402 
403 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
404 	delay(10);
405 
406 	WR4(sc, GENET_UMAC_CMD, 0);
407 	WR4(sc, GENET_UMAC_CMD,
408 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
409 	delay(10);
410 	WR4(sc, GENET_UMAC_CMD, 0);
411 
412 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
413 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
414 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
415 
416 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
417 
418 	val = RD4(sc, GENET_RBUF_CTRL);
419 	val |= GENET_RBUF_ALIGN_2B;
420 	WR4(sc, GENET_RBUF_CTRL, val);
421 
422 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
423 
424 	return 0;
425 }
426 
427 static void
428 genet_set_rxthresh(struct genet_softc *sc, int qid, int usecs, int count)
429 {
430 	int ticks;
431 	uint32_t val;
432 
433 	/* convert to 125MHz/1024 ticks */
434 	ticks = howmany(usecs * 125, 1024);
435 
436 	if (count < 1)
437 		count = 1;
438 	if (count > GENET_INTR_THRESHOLD_MASK)
439 		count = GENET_INTR_THRESHOLD_MASK;
440 	if (ticks < 0)
441 		ticks = 0;
442 	if (ticks > GENET_DMA_RING_TIMEOUT_MASK)
443 		ticks = GENET_DMA_RING_TIMEOUT_MASK;
444 
445 	WR4(sc, GENET_RX_DMA_MBUF_DONE_THRES(qid), count);
446 
447 	val = RD4(sc, GENET_RX_DMA_RING_TIMEOUT(qid));
448 	val &= ~GENET_DMA_RING_TIMEOUT_MASK;
449 	val |= ticks;
450 	WR4(sc, GENET_RX_DMA_RING_TIMEOUT(qid), val);
451 }
452 
453 static void
454 genet_set_txthresh(struct genet_softc *sc, int qid, int count)
455 {
456 	if (count < 1)
457 		count = 1;
458 	if (count > GENET_INTR_THRESHOLD_MASK)
459 		count = GENET_INTR_THRESHOLD_MASK;
460 
461 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), count);
462 }
463 
464 static void
465 genet_init_rings(struct genet_softc *sc, int qid)
466 {
467 	uint32_t val;
468 
469 	/* TX ring */
470 
471 	sc->sc_tx.queued = 0;
472 	sc->sc_tx.cidx = sc->sc_tx.pidx = 0;
473 
474 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
475 
476 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
477 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
478 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
479 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
480 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
481 	    __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) |
482 	    __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH));
483 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
484 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
485 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
486 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
487 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
488 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
489 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
490 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
491 
492 	/* interrupt after 10 packets or when ring empty */
493 	genet_set_txthresh(sc, qid, 10);
494 
495 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
496 
497 	/* Enable transmit DMA */
498 	val = RD4(sc, GENET_TX_DMA_CTRL);
499 	val |= GENET_TX_DMA_CTRL_EN;
500 	val |= GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
501 	WR4(sc, GENET_TX_DMA_CTRL, val);
502 
503 	/* RX ring */
504 
505 	sc->sc_rx.cidx = sc->sc_rx.pidx = 0;
506 
507 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
508 
509 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
510 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
511 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
512 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
513 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
514 	    __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) |
515 	    __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH));
516 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
517 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
518 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
519 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
520 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
521 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
522 	    __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) |
523 	    __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI));
524 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
525 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
526 
527 	/*
528 	 * interrupt on first packet,
529 	 * mitigation timeout timeout 57 us (~84 minimal packets at 1Gbit/s)
530 	 */
531 	genet_set_rxthresh(sc, qid, 57, 10);
532 
533 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
534 
535 	/* Enable receive DMA */
536 	val = RD4(sc, GENET_RX_DMA_CTRL);
537 	val |= GENET_RX_DMA_CTRL_EN;
538 	val |= GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
539 	WR4(sc, GENET_RX_DMA_CTRL, val);
540 }
541 
542 static int
543 genet_init_locked(struct genet_softc *sc)
544 {
545 	struct ifnet *ifp = &sc->sc_ec.ec_if;
546 	struct mii_data *mii = &sc->sc_mii;
547 	uint32_t val;
548 	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
549 
550 	GENET_ASSERT_LOCKED(sc);
551 	GENET_ASSERT_TXLOCKED(sc);
552 
553 	if ((ifp->if_flags & IFF_RUNNING) != 0)
554 		return 0;
555 
556 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII ||
557 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_ID ||
558 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID ||
559 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_TXID)
560 		WR4(sc, GENET_SYS_PORT_CTRL,
561 		    GENET_SYS_PORT_MODE_EXT_GPHY);
562 	else
563 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
564 
565 	/* Write hardware address */
566 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
567 	    (enaddr[0] << 24);
568 	WR4(sc, GENET_UMAC_MAC0, val);
569 	val = enaddr[5] | (enaddr[4] << 8);
570 	WR4(sc, GENET_UMAC_MAC1, val);
571 
572 	/* Setup RX filter */
573 	sc->sc_promisc = ifp->if_flags & IFF_PROMISC;
574 	genet_setup_rxfilter(sc);
575 
576 	/* Setup TX/RX rings */
577 	genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE);
578 
579 	/* Enable transmitter and receiver */
580 	val = RD4(sc, GENET_UMAC_CMD);
581 	val |= GENET_UMAC_CMD_TXEN;
582 	val |= GENET_UMAC_CMD_RXEN;
583 	WR4(sc, GENET_UMAC_CMD, val);
584 
585 	/* Enable interrupts */
586 	genet_enable_intr(sc);
587 
588 	GENET_ASSERT_TXLOCKED(sc);
589 	sc->sc_txrunning = true;
590 
591 	sc->sc_running = true;
592 	ifp->if_flags |= IFF_RUNNING;
593 
594 	mii_mediachg(mii);
595 	callout_schedule(&sc->sc_stat_ch, hz);
596 
597 	return 0;
598 }
599 
600 static int
601 genet_init(struct ifnet *ifp)
602 {
603 	struct genet_softc *sc = ifp->if_softc;
604 	int error;
605 
606 	GENET_LOCK(sc);
607 	GENET_TXLOCK(sc);
608 	error = genet_init_locked(sc);
609 	GENET_TXUNLOCK(sc);
610 	GENET_UNLOCK(sc);
611 
612 	return error;
613 }
614 
615 static int
616 genet_free_txbuf(struct genet_softc *sc, int index)
617 {
618 	struct genet_bufmap *bmap;
619 
620 	bmap = &sc->sc_tx.buf_map[index];
621 	if (bmap->mbuf == NULL)
622 		return 0;
623 
624 	if (bmap->map->dm_mapsize > 0) {
625 		bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map,
626 		    0, bmap->map->dm_mapsize,
627 		    BUS_DMASYNC_POSTWRITE);
628 	}
629 	bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map);
630 	m_freem(bmap->mbuf);
631 	bmap->mbuf = NULL;
632 
633 	return 1;
634 }
635 
636 static void
637 genet_stop_locked(struct genet_softc *sc, int disable)
638 {
639 	struct ifnet *ifp = &sc->sc_ec.ec_if;
640 	uint32_t val;
641 	int i;
642 
643 	GENET_ASSERT_LOCKED(sc);
644 
645 	GENET_TXLOCK(sc);
646 	sc->sc_txrunning = false;
647 	GENET_TXUNLOCK(sc);
648 
649 	sc->sc_running = false;
650 	callout_halt(&sc->sc_stat_ch, &sc->sc_lock);
651 
652 	mii_down(&sc->sc_mii);
653 
654 	/* Disable receiver */
655 	val = RD4(sc, GENET_UMAC_CMD);
656 	val &= ~GENET_UMAC_CMD_RXEN;
657 	WR4(sc, GENET_UMAC_CMD, val);
658 
659 	/* Stop receive DMA */
660 	val = RD4(sc, GENET_RX_DMA_CTRL);
661 	val &= ~GENET_RX_DMA_CTRL_EN;
662 	WR4(sc, GENET_RX_DMA_CTRL, val);
663 
664 	/* Stop transmit DMA */
665 	val = RD4(sc, GENET_TX_DMA_CTRL);
666 	val &= ~GENET_TX_DMA_CTRL_EN;
667 	WR4(sc, GENET_TX_DMA_CTRL, val);
668 
669 	/* Flush data in the TX FIFO */
670 	WR4(sc, GENET_UMAC_TX_FLUSH, 1);
671 	delay(10);
672 	WR4(sc, GENET_UMAC_TX_FLUSH, 0);
673 
674 	/* Disable transmitter */
675 	val = RD4(sc, GENET_UMAC_CMD);
676 	val &= ~GENET_UMAC_CMD_TXEN;
677 	WR4(sc, GENET_UMAC_CMD, val);
678 
679 	/* Disable interrupts */
680 	genet_disable_intr(sc);
681 
682 	/* Free TX buffers */
683 	for (i=0; i<TX_DESC_COUNT; ++i)
684 		genet_free_txbuf(sc, i);
685 
686 	ifp->if_flags &= ~IFF_RUNNING;
687 }
688 
689 static void
690 genet_stop(struct ifnet *ifp, int disable)
691 {
692 	struct genet_softc * const sc = ifp->if_softc;
693 
694 	GENET_LOCK(sc);
695 	genet_stop_locked(sc, disable);
696 	GENET_UNLOCK(sc);
697 }
698 
699 static void
700 genet_rxintr(struct genet_softc *sc, int qid)
701 {
702 	struct ifnet *ifp = &sc->sc_ec.ec_if;
703 	int error, index, len, n;
704 	struct mbuf *m, *m0;
705 	uint32_t status, pidx, total;
706 	int pkts = 0;
707 
708 	pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff;
709 	total = (pidx - sc->sc_rx.cidx) & 0xffff;
710 
711 	DPRINTF("RX pidx=%08x total=%d\n", pidx, total);
712 
713 	index = sc->sc_rx.cidx % RX_DESC_COUNT;
714 	for (n = 0; n < total; n++) {
715 		status = RD4(sc, GENET_RX_DESC_STATUS(index));
716 
717 		if (status & GENET_RX_DESC_STATUS_ALL_ERRS) {
718 			if (status & GENET_RX_DESC_STATUS_OVRUN_ERR)
719 				device_printf(sc->sc_dev, "overrun\n");
720 			if (status & GENET_RX_DESC_STATUS_CRC_ERR)
721 				device_printf(sc->sc_dev, "CRC error\n");
722 			if (status & GENET_RX_DESC_STATUS_RX_ERR)
723 				device_printf(sc->sc_dev, "receive error\n");
724 			if (status & GENET_RX_DESC_STATUS_FRAME_ERR)
725 				device_printf(sc->sc_dev, "frame error\n");
726 			if (status & GENET_RX_DESC_STATUS_LEN_ERR)
727 				device_printf(sc->sc_dev, "length error\n");
728 			if_statinc(ifp, if_ierrors);
729 			goto next;
730 		}
731 
732 		if (status & GENET_RX_DESC_STATUS_OWN)
733 			device_printf(sc->sc_dev, "OWN %d of %d\n",n,total);
734 
735 		len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN);
736 		if (len < ETHER_ALIGN) {
737 			if_statinc(ifp, if_ierrors);
738 			goto next;
739 		}
740 
741 		m = sc->sc_rx.buf_map[index].mbuf;
742 
743 		if ((m0 = genet_alloc_mbufcl(sc)) == NULL) {
744 			if_statinc(ifp, if_ierrors);
745 			goto next;
746 		}
747 		MCLAIM(m0, &sc->sc_ec.ec_rx_mowner);
748 
749 		/* unload map before it gets loaded in setup_rxbuf */
750 		if (sc->sc_rx.buf_map[index].map->dm_mapsize > 0) {
751 			bus_dmamap_sync(sc->sc_rx.buf_tag,
752 			    sc->sc_rx.buf_map[index].map,
753 			    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
754 			    BUS_DMASYNC_POSTREAD);
755 		}
756 		bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map);
757 		sc->sc_rx.buf_map[index].mbuf = NULL;
758 
759 		error = genet_setup_rxbuf(sc, index, m0);
760 		if (error != 0) {
761 			m_freem(m0);
762 			if_statinc(ifp, if_ierrors);
763 
764 			/* XXX mbuf is unloaded but load failed */
765 			m_freem(m);
766 			device_printf(sc->sc_dev,
767 			    "cannot load RX mbuf. panic?\n");
768 			goto next;
769 		}
770 
771 		DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n",
772 		    n, index, status, len, len - ETHER_ALIGN);
773 
774 		m_set_rcvif(m, ifp);
775 		m->m_len = m->m_pkthdr.len = len;
776 		m_adj(m, ETHER_ALIGN);
777 
778 		if_percpuq_enqueue(ifp->if_percpuq, m);
779 		++pkts;
780 
781 next:
782 		index = RX_NEXT(index);
783 
784 		sc->sc_rx.cidx = (sc->sc_rx.cidx + 1) & 0xffff;
785 		WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
786 	}
787 
788 	if (pkts != 0)
789 		rnd_add_uint32(&sc->sc_rndsource, pkts);
790 }
791 
792 static void
793 genet_txintr(struct genet_softc *sc, int qid)
794 {
795 	struct ifnet *ifp = &sc->sc_ec.ec_if;
796 	int cidx, i, pkts = 0;
797 
798 	cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff;
799 	i = sc->sc_tx.cidx % TX_DESC_COUNT;
800 	while (sc->sc_tx.cidx != cidx) {
801 		pkts += genet_free_txbuf(sc, i);
802 		i = TX_NEXT(i);
803 		sc->sc_tx.cidx = (sc->sc_tx.cidx + 1) & 0xffff;
804 	}
805 
806 	if (pkts != 0) {
807 		if_statadd(ifp, if_opackets, pkts);
808 		rnd_add_uint32(&sc->sc_rndsource, pkts);
809 	}
810 
811 	if_schedule_deferred_start(ifp);
812 }
813 
814 static void
815 genet_start_locked(struct genet_softc *sc)
816 {
817 	struct ifnet *ifp = &sc->sc_ec.ec_if;
818 	struct mbuf *m;
819 	int nsegs, index, cnt;
820 
821 	GENET_ASSERT_TXLOCKED(sc);
822 
823 	if (!sc->sc_txrunning)
824 		return;
825 
826 	const int qid = GENET_DMA_DEFAULT_QUEUE;
827 
828 	index = sc->sc_tx.pidx % TX_DESC_COUNT;
829 	cnt = 0;
830 
831 	sc->sc_tx.queued = (RD4(sc, GENET_TX_DMA_PROD_INDEX(qid))
832 	          - sc->sc_tx.cidx) & 0xffff;
833 
834 	/* At least one descriptor free ? */
835 	if (sc->sc_tx.queued >= TX_DESC_COUNT - 1)
836 		return;
837 
838 	for (;;) {
839 		IFQ_POLL(&ifp->if_snd, m);
840 		if (m == NULL)
841 			break;
842 
843 		nsegs = genet_setup_txbuf(sc, index, m);
844 		if (nsegs <= 0) {
845 			if (nsegs == -2) {
846 				IFQ_DEQUEUE(&ifp->if_snd, m);
847 				m_freem(m);
848 				continue;
849 			}
850 			break;
851 		}
852 
853 		IFQ_DEQUEUE(&ifp->if_snd, m);
854 		bpf_mtap(ifp, m, BPF_D_OUT);
855 
856 		index = TX_SKIP(index, nsegs);
857 		sc->sc_tx.queued += nsegs;
858 		sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff;
859 		cnt++;
860 	}
861 
862 	if (cnt != 0)
863 		WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
864 }
865 
866 static void
867 genet_start(struct ifnet *ifp)
868 {
869 	struct genet_softc *sc = ifp->if_softc;
870 
871 	GENET_TXLOCK(sc);
872 	genet_start_locked(sc);
873 	GENET_TXUNLOCK(sc);
874 }
875 
876 int
877 genet_intr(void *arg)
878 {
879 	struct genet_softc *sc = arg;
880 	uint32_t val;
881 
882 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
883 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
884 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
885 
886 	if (val & GENET_IRQ_RXDMA_DONE) {
887 		GENET_LOCK(sc);
888 		genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE);
889 		GENET_UNLOCK(sc);
890 	}
891 
892 	if (val & GENET_IRQ_TXDMA_DONE) {
893 		genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE);
894 	}
895 
896 	return 1;
897 }
898 
899 static int
900 genet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
901 {
902 	struct genet_softc *sc = ifp->if_softc;
903 	int error;
904 
905 	const int s = splnet();
906 	error = ether_ioctl(ifp, cmd, data);
907 	splx(s);
908 
909 	if (error != ENETRESET)
910 		return error;
911 
912 	error = 0;
913 
914 	if (cmd == SIOCSIFCAP)
915 		error = if_init(ifp);
916 	else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
917 		GENET_LOCK(sc);
918 		sc->sc_promisc = ifp->if_flags & IFF_PROMISC;
919 		if (sc->sc_running)
920 			genet_setup_rxfilter(sc);
921 		GENET_UNLOCK(sc);
922 	}
923 	return error;
924 }
925 
926 static void
927 genet_get_eaddr(struct genet_softc *sc, uint8_t *eaddr)
928 {
929 	prop_dictionary_t prop = device_properties(sc->sc_dev);
930 	uint32_t maclo, machi, val;
931 	prop_data_t eaprop;
932 
933 	eaprop = prop_dictionary_get(prop, "mac-address");
934 	if (eaprop != NULL) {
935 		KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
936 		KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
937 		memcpy(eaddr, prop_data_value(eaprop),
938 		    ETHER_ADDR_LEN);
939 		return;
940 	}
941 
942 	maclo = machi = 0;
943 
944 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
945 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
946 		maclo = RD4(sc, GENET_UMAC_MAC0);
947 		machi = RD4(sc, GENET_UMAC_MAC1) & 0xffff;
948 	}
949 
950 	if (maclo == 0 && machi == 0) {
951 		/* Create one */
952 		maclo = 0x00f2 | (cprng_strong32() & 0xffff0000);
953 		machi = cprng_strong32() & 0xffff;
954 	}
955 
956 	eaddr[0] = (maclo >> 24) & 0xff;
957 	eaddr[1] = (maclo >> 16) & 0xff;
958 	eaddr[2] = (maclo >>  8) & 0xff;
959 	eaddr[3] = (maclo >>  0) & 0xff;
960 	eaddr[4] = (machi >>  8) & 0xff;
961 	eaddr[5] = (machi >>  0) & 0xff;
962 }
963 
964 static int
965 genet_setup_dma(struct genet_softc *sc, int qid)
966 {
967 	struct mbuf *m;
968 	int error, i;
969 
970 	/* Setup TX ring */
971 	sc->sc_tx.buf_tag = sc->sc_dmat;
972 	for (i = 0; i < TX_DESC_COUNT; i++) {
973 		error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES,
974 		    TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK,
975 		    &sc->sc_tx.buf_map[i].map);
976 		if (error != 0) {
977 			device_printf(sc->sc_dev,
978 			    "cannot create TX buffer map\n");
979 			return error;
980 		}
981 	}
982 
983 	/* Setup RX ring */
984 	sc->sc_rx.buf_tag = sc->sc_dmat;
985 	for (i = 0; i < RX_DESC_COUNT; i++) {
986 		error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES,
987 		    1, MCLBYTES, 0, BUS_DMA_WAITOK,
988 		    &sc->sc_rx.buf_map[i].map);
989 		if (error != 0) {
990 			device_printf(sc->sc_dev,
991 			    "cannot create RX buffer map\n");
992 			return error;
993 		}
994 		if ((m = genet_alloc_mbufcl(sc)) == NULL) {
995 			device_printf(sc->sc_dev, "cannot allocate RX mbuf\n");
996 			return ENOMEM;
997 		}
998 		error = genet_setup_rxbuf(sc, i, m);
999 		if (error != 0) {
1000 			device_printf(sc->sc_dev, "cannot create RX buffer\n");
1001 			return error;
1002 		}
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static void
1009 genet_claim_rxring(struct genet_softc *sc, int qid)
1010 {
1011 	struct mbuf *m;
1012 	int i;
1013 
1014 	/* Claim mbufs from RX ring */
1015 	for (i = 0; i < RX_DESC_COUNT; i++) {
1016 		m = sc->sc_rx.buf_map[i].mbuf;
1017 		if (m != NULL) {
1018 			MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1019 		}
1020 	}
1021 }
1022 
1023 int
1024 genet_attach(struct genet_softc *sc)
1025 {
1026 	struct mii_data *mii = &sc->sc_mii;
1027 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1028 	uint8_t eaddr[ETHER_ADDR_LEN];
1029 	u_int maj, min;
1030 	int mii_flags = 0;
1031 
1032 	const uint32_t rev = RD4(sc, GENET_SYS_REV_CTRL);
1033 	min = __SHIFTOUT(rev, SYS_REV_MINOR);
1034 	maj = __SHIFTOUT(rev, SYS_REV_MAJOR);
1035 	if (maj == 0)
1036 		maj++;
1037 	else if (maj == 5 || maj == 6)
1038 		maj--;
1039 
1040 	if (maj != 5) {
1041 		aprint_error(": GENETv%d.%d not supported\n", maj, min);
1042 		return ENXIO;
1043 	}
1044 
1045 	switch (sc->sc_phy_mode) {
1046 	case GENET_PHY_MODE_RGMII_TXID:
1047 		mii_flags |= MIIF_TXID;
1048 		break;
1049 	case GENET_PHY_MODE_RGMII_RXID:
1050 		mii_flags |= MIIF_RXID;
1051 		break;
1052 	case GENET_PHY_MODE_RGMII_ID:
1053 		mii_flags |= MIIF_RXID | MIIF_TXID;
1054 		break;
1055 	case GENET_PHY_MODE_RGMII:
1056 	default:
1057 		break;
1058 	}
1059 
1060 	aprint_naive("\n");
1061 	aprint_normal(": GENETv%d.%d\n", maj, min);
1062 
1063 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET);
1064 	mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET);
1065 	callout_init(&sc->sc_stat_ch, CALLOUT_MPSAFE);
1066 	callout_setfunc(&sc->sc_stat_ch, genet_tick, sc);
1067 
1068 	genet_get_eaddr(sc, eaddr);
1069 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr));
1070 
1071 	/* Soft reset EMAC core */
1072 	genet_reset(sc);
1073 
1074 	/* Setup DMA descriptors */
1075 	if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) {
1076 		aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n");
1077 		return EINVAL;
1078 	}
1079 
1080 	/* Setup ethernet interface */
1081 	ifp->if_softc = sc;
1082 	snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev));
1083 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1084 	ifp->if_extflags = IFEF_MPSAFE;
1085 	ifp->if_start = genet_start;
1086 	ifp->if_ioctl = genet_ioctl;
1087 	ifp->if_init = genet_init;
1088 	ifp->if_stop = genet_stop;
1089 	ifp->if_capabilities = 0;
1090 	ifp->if_capenable = ifp->if_capabilities;
1091 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1092 	IFQ_SET_READY(&ifp->if_snd);
1093 
1094 	/* 802.1Q VLAN-sized frames are supported */
1095 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1096 
1097 	/* Attach MII driver */
1098 	sc->sc_ec.ec_mii = mii;
1099 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
1100 	mii->mii_ifp = ifp;
1101 	mii->mii_readreg = genet_mii_readreg;
1102 	mii->mii_writereg = genet_mii_writereg;
1103 	mii->mii_statchg = genet_mii_statchg;
1104 	mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY,
1105 	    mii_flags);
1106 
1107 	if (LIST_EMPTY(&mii->mii_phys)) {
1108 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
1109 		return ENOENT;
1110 	}
1111 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1112 
1113 	/* Attach interface */
1114 	if_attach(ifp);
1115 	if_deferred_start_init(ifp, NULL);
1116 
1117 	/* Attach ethernet interface */
1118 	ether_ifattach(ifp, eaddr);
1119 
1120 	/* MBUFTRACE */
1121 	genet_claim_rxring(sc, GENET_DMA_DEFAULT_QUEUE);
1122 
1123 	rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET,
1124 	    RND_FLAG_DEFAULT);
1125 
1126 	return 0;
1127 }
1128 
1129 #ifdef DDB
1130 void	genet_debug(void);
1131 
1132 void
1133 genet_debug(void)
1134 {
1135 	device_t dev = device_find_by_xname("genet0");
1136 	if (dev == NULL)
1137 		return;
1138 
1139 	struct genet_softc * const sc = device_private(dev);
1140 	const int qid = GENET_DMA_DEFAULT_QUEUE;
1141 
1142 	printf("TX CIDX = %08x (soft)\n", sc->sc_tx.cidx);
1143 	printf("TX CIDX = %08x\n", RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)));
1144 	printf("TX PIDX = %08x (soft)\n", sc->sc_tx.pidx);
1145 	printf("TX PIDX = %08x\n", RD4(sc, GENET_TX_DMA_PROD_INDEX(qid)));
1146 
1147 	printf("RX CIDX = %08x (soft)\n", sc->sc_rx.cidx);
1148 	printf("RX CIDX = %08x\n", RD4(sc, GENET_RX_DMA_CONS_INDEX(qid)));
1149 	printf("RX PIDX = %08x (soft)\n", sc->sc_rx.pidx);
1150 	printf("RX PIDX = %08x\n", RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)));
1151 }
1152 #endif
1153