xref: /netbsd-src/sys/dev/ic/bcmgenet.c (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1 /* $NetBSD: bcmgenet.c,v 1.7 2020/06/27 13:34:20 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2020 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Broadcom GENETv5
31  */
32 
33 #include "opt_net_mpsafe.h"
34 #include "opt_ddb.h"
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: bcmgenet.c,v 1.7 2020/06/27 13:34:20 jmcneill Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/device.h>
42 #include <sys/intr.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mutex.h>
46 #include <sys/callout.h>
47 #include <sys/cprng.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_ether.h>
52 #include <net/if_media.h>
53 #include <net/bpf.h>
54 
55 #include <dev/mii/miivar.h>
56 
57 #include <dev/ic/bcmgenetreg.h>
58 #include <dev/ic/bcmgenetvar.h>
59 
60 CTASSERT(MCLBYTES == 2048);
61 
62 #ifdef GENET_DEBUG
63 #define	DPRINTF(...)	printf(##__VA_ARGS__)
64 #else
65 #define	DPRINTF(...)	((void)0)
66 #endif
67 
68 #ifdef NET_MPSAFE
69 #define	GENET_MPSAFE		1
70 #define	CALLOUT_FLAGS		CALLOUT_MPSAFE
71 #else
72 #define	CALLOUT_FLAGS		0
73 #endif
74 
75 #define	TX_SKIP(n, o)		(((n) + (o)) & (GENET_DMA_DESC_COUNT - 1))
76 #define	TX_NEXT(n)		TX_SKIP(n, 1)
77 #define	RX_NEXT(n)		(((n) + 1) & (GENET_DMA_DESC_COUNT - 1))
78 
79 #define	TX_MAX_SEGS		128
80 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
81 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
82 #define	MII_BUSY_RETRY		1000
83 #define	GENET_MAX_MDF_FILTER	17
84 
85 #define	GENET_LOCK(sc)		mutex_enter(&(sc)->sc_lock)
86 #define	GENET_UNLOCK(sc)	mutex_exit(&(sc)->sc_lock)
87 #define	GENET_ASSERT_LOCKED(sc)	KASSERT(mutex_owned(&(sc)->sc_lock))
88 
89 #define	RD4(sc, reg)			\
90 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
91 #define	WR4(sc, reg, val)		\
92 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
93 
94 static int
95 genet_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
96 {
97 	struct genet_softc *sc = device_private(dev);
98 	int retry;
99 
100 	WR4(sc, GENET_MDIO_CMD,
101 	    GENET_MDIO_READ | GENET_MDIO_START_BUSY |
102 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
103 	    __SHIFTIN(reg, GENET_MDIO_REG));
104 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
105 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0) {
106 			*val = RD4(sc, GENET_MDIO_CMD) & 0xffff;
107 			break;
108 		}
109 		delay(10);
110 	}
111 
112 
113 	if (retry == 0) {
114 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
115 		    phy, reg);
116 		return ETIMEDOUT;
117 	}
118 
119 	return 0;
120 }
121 
122 static int
123 genet_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
124 {
125 	struct genet_softc *sc = device_private(dev);
126 	int retry;
127 
128 	WR4(sc, GENET_MDIO_CMD,
129 	    val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY |
130 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
131 	    __SHIFTIN(reg, GENET_MDIO_REG));
132 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
133 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
134 			break;
135 		delay(10);
136 	}
137 
138 	if (retry == 0) {
139 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
140 		    phy, reg);
141 		return ETIMEDOUT;
142 	}
143 
144 	return 0;
145 }
146 
147 static void
148 genet_update_link(struct genet_softc *sc)
149 {
150 	struct mii_data *mii = &sc->sc_mii;
151 	uint32_t val;
152 	u_int speed;
153 
154 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
155 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
156 		speed = GENET_UMAC_CMD_SPEED_1000;
157 	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
158 		speed = GENET_UMAC_CMD_SPEED_100;
159 	else
160 		speed = GENET_UMAC_CMD_SPEED_10;
161 
162 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
163 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
164 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
165 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
166 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII)
167 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
168 	else
169 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
170 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
171 
172 	val = RD4(sc, GENET_UMAC_CMD);
173 	val &= ~GENET_UMAC_CMD_SPEED;
174 	val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED);
175 	WR4(sc, GENET_UMAC_CMD, val);
176 }
177 
178 static void
179 genet_mii_statchg(struct ifnet *ifp)
180 {
181 	struct genet_softc * const sc = ifp->if_softc;
182 
183 	genet_update_link(sc);
184 }
185 
186 static void
187 genet_setup_txdesc(struct genet_softc *sc, int index, int flags,
188     bus_addr_t paddr, u_int len)
189 {
190 	uint32_t status;
191 
192 	status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN);
193 	++sc->sc_tx.queued;
194 
195 	WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
196 	WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
197 	WR4(sc, GENET_TX_DESC_STATUS(index), status);
198 }
199 
200 static int
201 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m)
202 {
203 	bus_dma_segment_t *segs;
204 	int error, nsegs, cur, i;
205 	uint32_t flags;
206 
207 	error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
208 	    sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
209 	if (error == EFBIG) {
210 		device_printf(sc->sc_dev,
211 		    "TX packet needs too many DMA segments, dropping...\n");
212 		m_freem(m);
213 		return 0;
214 	}
215 	if (error != 0)
216 		return 0;
217 
218 	segs = sc->sc_tx.buf_map[index].map->dm_segs;
219 	nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs;
220 
221 	if (sc->sc_tx.queued >= GENET_DMA_DESC_COUNT - nsegs) {
222 		bus_dmamap_unload(sc->sc_tx.buf_tag,
223 		    sc->sc_tx.buf_map[index].map);
224 		return -1;
225 	}
226 
227 	flags = GENET_TX_DESC_STATUS_SOP |
228 		GENET_TX_DESC_STATUS_CRC |
229 		GENET_TX_DESC_STATUS_QTAG;
230 
231 	for (cur = index, i = 0; i < nsegs; i++) {
232 		sc->sc_tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
233 		if (i == nsegs - 1)
234 			flags |= GENET_TX_DESC_STATUS_EOP;
235 
236 		genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
237 		    segs[i].ds_len);
238 
239 		if (i == 0) {
240 			flags &= ~GENET_TX_DESC_STATUS_SOP;
241 			flags &= ~GENET_TX_DESC_STATUS_CRC;
242 		}
243 		cur = TX_NEXT(cur);
244 	}
245 
246 	bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map,
247 	    0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE);
248 
249 	return nsegs;
250 }
251 
252 static void
253 genet_setup_rxdesc(struct genet_softc *sc, int index,
254     bus_addr_t paddr, bus_size_t len)
255 {
256 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
257 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
258 }
259 
260 static int
261 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m)
262 {
263 	int error;
264 
265 	error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag,
266 	    sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
267 	if (error != 0)
268 		return error;
269 
270 	bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
271 	    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
272 	    BUS_DMASYNC_PREREAD);
273 
274 	sc->sc_rx.buf_map[index].mbuf = m;
275 	genet_setup_rxdesc(sc, index,
276 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr,
277 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len);
278 
279 	return 0;
280 }
281 
282 static struct mbuf *
283 genet_alloc_mbufcl(struct genet_softc *sc)
284 {
285 	struct mbuf *m;
286 
287 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
288 	if (m != NULL)
289 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
290 
291 	return m;
292 }
293 
294 static void
295 genet_enable_intr(struct genet_softc *sc)
296 {
297 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
298 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
299 }
300 
301 static void
302 genet_disable_intr(struct genet_softc *sc)
303 {
304 	/* Disable interrupts */
305 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
306 	WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff);
307 }
308 
309 static void
310 genet_tick(void *softc)
311 {
312 	struct genet_softc *sc = softc;
313 	struct mii_data *mii = &sc->sc_mii;
314 #ifndef GENET_MPSAFE
315 	int s = splnet();
316 #endif
317 
318 	GENET_LOCK(sc);
319 	mii_tick(mii);
320 	callout_schedule(&sc->sc_stat_ch, hz);
321 	GENET_UNLOCK(sc);
322 
323 #ifndef GENET_MPSAFE
324 	splx(s);
325 #endif
326 }
327 
328 static void
329 genet_setup_rxfilter_mdf(struct genet_softc *sc, u_int n, const uint8_t *ea)
330 {
331 	uint32_t addr0 = (ea[0] << 8) | ea[1];
332 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
333 
334 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
335 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
336 }
337 
338 static void
339 genet_setup_rxfilter(struct genet_softc *sc)
340 {
341 	struct ethercom *ec = &sc->sc_ec;
342 	struct ifnet *ifp = &ec->ec_if;
343 	struct ether_multistep step;
344 	struct ether_multi *enm;
345 	uint32_t cmd, mdf_ctrl;
346 	u_int n;
347 
348 	GENET_ASSERT_LOCKED(sc);
349 
350 	ETHER_LOCK(ec);
351 
352 	cmd = RD4(sc, GENET_UMAC_CMD);
353 
354 	/*
355 	 * Count the required number of hardware filters. We need one
356 	 * for each multicast address, plus one for our own address and
357 	 * the broadcast address.
358 	 */
359 	ETHER_FIRST_MULTI(step, ec, enm);
360 	for (n = 2; enm != NULL; n++)
361 		ETHER_NEXT_MULTI(step, enm);
362 
363 	if (n > GENET_MAX_MDF_FILTER)
364 		ifp->if_flags |= IFF_ALLMULTI;
365 	else
366 		ifp->if_flags &= ~IFF_ALLMULTI;
367 
368 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
369 		cmd |= GENET_UMAC_CMD_PROMISC;
370 		mdf_ctrl = 0;
371 	} else {
372 		cmd &= ~GENET_UMAC_CMD_PROMISC;
373 		genet_setup_rxfilter_mdf(sc, 0, ifp->if_broadcastaddr);
374 		genet_setup_rxfilter_mdf(sc, 1, CLLADDR(ifp->if_sadl));
375 		ETHER_FIRST_MULTI(step, ec, enm);
376 		for (n = 2; enm != NULL; n++) {
377 			genet_setup_rxfilter_mdf(sc, n, enm->enm_addrlo);
378 			ETHER_NEXT_MULTI(step, enm);
379 		}
380 		mdf_ctrl = __BITS(GENET_MAX_MDF_FILTER - 1,
381 				  GENET_MAX_MDF_FILTER - n);
382 	}
383 
384 	WR4(sc, GENET_UMAC_CMD, cmd);
385 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
386 
387 	ETHER_UNLOCK(ec);
388 }
389 
390 static int
391 genet_reset(struct genet_softc *sc)
392 {
393 	uint32_t val;
394 
395 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
396 	val |= GENET_SYS_RBUF_FLUSH_RESET;
397 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
398 	delay(10);
399 
400 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
401 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
402 	delay(10);
403 
404 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
405 	delay(10);
406 
407 	WR4(sc, GENET_UMAC_CMD, 0);
408 	WR4(sc, GENET_UMAC_CMD,
409 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
410 	delay(10);
411 	WR4(sc, GENET_UMAC_CMD, 0);
412 
413 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
414 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
415 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
416 
417 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
418 
419 	val = RD4(sc, GENET_RBUF_CTRL);
420 	val |= GENET_RBUF_ALIGN_2B;
421 	WR4(sc, GENET_RBUF_CTRL, val);
422 
423 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
424 
425 	return 0;
426 }
427 
428 static void
429 genet_init_rings(struct genet_softc *sc, int qid)
430 {
431 	uint32_t val;
432 
433 	/* TX ring */
434 
435 	sc->sc_tx.queued = 0;
436 	sc->sc_tx.cidx = sc->sc_tx.pidx = 0;
437 
438 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
439 
440 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
441 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
442 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
443 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
444 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
445 	    __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) |
446 	    __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH));
447 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
448 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
449 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
450 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
451 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
452 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
453 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
454 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
455 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
456 
457 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
458 
459 	/* Enable transmit DMA */
460 	val = RD4(sc, GENET_TX_DMA_CTRL);
461 	val |= GENET_TX_DMA_CTRL_EN;
462 	val |= GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
463 	WR4(sc, GENET_TX_DMA_CTRL, val);
464 
465 	/* RX ring */
466 
467 	sc->sc_rx.cidx = sc->sc_rx.pidx = 0;
468 
469 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
470 
471 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
472 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
473 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
474 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
475 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
476 	    __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) |
477 	    __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH));
478 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
479 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
480 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
481 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
482 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
483 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
484 	    __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) |
485 	    __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI));
486 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
487 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
488 
489 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
490 
491 	/* Enable receive DMA */
492 	val = RD4(sc, GENET_RX_DMA_CTRL);
493 	val |= GENET_RX_DMA_CTRL_EN;
494 	val |= GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
495 	WR4(sc, GENET_RX_DMA_CTRL, val);
496 }
497 
498 static int
499 genet_init_locked(struct genet_softc *sc)
500 {
501 	struct ifnet *ifp = &sc->sc_ec.ec_if;
502 	struct mii_data *mii = &sc->sc_mii;
503 	uint32_t val;
504 	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
505 
506 	GENET_ASSERT_LOCKED(sc);
507 
508 	if ((ifp->if_flags & IFF_RUNNING) != 0)
509 		return 0;
510 
511 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII ||
512 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_ID ||
513 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID ||
514 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_TXID)
515 		WR4(sc, GENET_SYS_PORT_CTRL,
516 		    GENET_SYS_PORT_MODE_EXT_GPHY);
517 	else
518 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
519 
520 	/* Write hardware address */
521 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
522 	    (enaddr[0] << 24);
523 	WR4(sc, GENET_UMAC_MAC0, val);
524 	val = enaddr[5] | (enaddr[4] << 8);
525 	WR4(sc, GENET_UMAC_MAC1, val);
526 
527 	/* Setup RX filter */
528 	genet_setup_rxfilter(sc);
529 
530 	/* Setup TX/RX rings */
531 	genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE);
532 
533 	/* Enable transmitter and receiver */
534 	val = RD4(sc, GENET_UMAC_CMD);
535 	val |= GENET_UMAC_CMD_TXEN;
536 	val |= GENET_UMAC_CMD_RXEN;
537 	WR4(sc, GENET_UMAC_CMD, val);
538 
539 	/* Enable interrupts */
540 	genet_enable_intr(sc);
541 
542 	ifp->if_flags |= IFF_RUNNING;
543 	ifp->if_flags &= ~IFF_OACTIVE;
544 
545 	mii_mediachg(mii);
546 	callout_schedule(&sc->sc_stat_ch, hz);
547 
548 	return 0;
549 }
550 
551 static int
552 genet_init(struct ifnet *ifp)
553 {
554 	struct genet_softc *sc = ifp->if_softc;
555 	int error;
556 
557 	GENET_LOCK(sc);
558 	error = genet_init_locked(sc);
559 	GENET_UNLOCK(sc);
560 
561 	return error;
562 }
563 
564 static void
565 genet_stop_locked(struct genet_softc *sc, int disable)
566 {
567 	struct ifnet *ifp = &sc->sc_ec.ec_if;
568 	uint32_t val;
569 
570 	GENET_ASSERT_LOCKED(sc);
571 
572 	callout_stop(&sc->sc_stat_ch);
573 
574 	mii_down(&sc->sc_mii);
575 
576 	/* Disable receiver */
577 	val = RD4(sc, GENET_UMAC_CMD);
578 	val &= ~GENET_UMAC_CMD_RXEN;
579 	WR4(sc, GENET_UMAC_CMD, val);
580 
581 	/* Stop receive DMA */
582 	val = RD4(sc, GENET_RX_DMA_CTRL);
583 	val &= ~GENET_RX_DMA_CTRL_EN;
584 	WR4(sc, GENET_RX_DMA_CTRL, val);
585 
586 	/* Stop transmit DMA */
587 	val = RD4(sc, GENET_TX_DMA_CTRL);
588 	val &= ~GENET_TX_DMA_CTRL_EN;
589 	WR4(sc, GENET_TX_DMA_CTRL, val);
590 
591 	/* Flush data in the TX FIFO */
592 	WR4(sc, GENET_UMAC_TX_FLUSH, 1);
593 	delay(10);
594 	WR4(sc, GENET_UMAC_TX_FLUSH, 0);
595 
596 	/* Disable transmitter */
597 	val = RD4(sc, GENET_UMAC_CMD);
598 	val &= ~GENET_UMAC_CMD_TXEN;
599 	WR4(sc, GENET_UMAC_CMD, val);
600 
601 	/* Disable interrupts */
602 	genet_disable_intr(sc);
603 
604 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
605 }
606 
607 static void
608 genet_stop(struct ifnet *ifp, int disable)
609 {
610 	struct genet_softc * const sc = ifp->if_softc;
611 
612 	GENET_LOCK(sc);
613 	genet_stop_locked(sc, disable);
614 	GENET_UNLOCK(sc);
615 }
616 
617 static void
618 genet_rxintr(struct genet_softc *sc, int qid)
619 {
620 	struct ifnet *ifp = &sc->sc_ec.ec_if;
621 	int error, index, len, n;
622 	struct mbuf *m, *m0;
623 	uint32_t status, pidx, total;
624 
625 	pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff;
626 	total = (pidx - sc->sc_rx.cidx) & 0xffff;
627 
628 	DPRINTF("RX pidx=%08x total=%d\n", pidx, total);
629 
630 	index = sc->sc_rx.cidx & (RX_DESC_COUNT - 1);
631 	for (n = 0; n < total; n++) {
632 		status = RD4(sc, GENET_RX_DESC_STATUS(index));
633 		len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN);
634 
635 		m = sc->sc_rx.buf_map[index].mbuf;
636 
637 		if ((m0 = genet_alloc_mbufcl(sc)) == NULL) {
638 			if_statinc(ifp, if_ierrors);
639 			goto next;
640 		}
641 		error = genet_setup_rxbuf(sc, index, m0);
642 		if (error != 0) {
643 			if_statinc(ifp, if_ierrors);
644 			goto next;
645 		}
646 
647 		bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
648 		    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
649 		    BUS_DMASYNC_POSTREAD);
650 		bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map);
651 
652 		DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n",
653 		    n, index, status, len, len - ETHER_ALIGN);
654 
655 		if (len > ETHER_ALIGN) {
656 			m_adj(m, ETHER_ALIGN);
657 
658 			m_set_rcvif(m, ifp);
659 			m->m_len = m->m_pkthdr.len = len - ETHER_ALIGN;
660 			m->m_nextpkt = NULL;
661 
662 			if_percpuq_enqueue(ifp->if_percpuq, m);
663 		}
664 
665 next:
666 		index = RX_NEXT(index);
667 
668 		sc->sc_rx.cidx = (sc->sc_rx.cidx + 1) & 0xffff;
669 		WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
670 	}
671 }
672 
673 static void
674 genet_txintr(struct genet_softc *sc, int qid)
675 {
676 	struct ifnet *ifp = &sc->sc_ec.ec_if;
677 	struct genet_bufmap *bmap;
678 	uint32_t cidx, total;
679 	int i;
680 
681 	GENET_ASSERT_LOCKED(sc);
682 
683 	cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff;
684 	total = (cidx - sc->sc_tx.cidx) & 0xffff;
685 
686 	for (i = sc->sc_tx.next; sc->sc_tx.queued > 0 && total > 0; i = TX_NEXT(i), total--) {
687 		/* XXX check for errors */
688 
689 		bmap = &sc->sc_tx.buf_map[i];
690 		if (bmap->mbuf != NULL) {
691 			bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map,
692 			    0, bmap->map->dm_mapsize,
693 			    BUS_DMASYNC_POSTWRITE);
694 			bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map);
695 			m_freem(bmap->mbuf);
696 			bmap->mbuf = NULL;
697 		}
698 
699 		--sc->sc_tx.queued;
700 		ifp->if_flags &= ~IFF_OACTIVE;
701 		if_statinc(ifp, if_opackets);
702 	}
703 
704 	sc->sc_tx.next = i;
705 	sc->sc_tx.cidx = cidx;
706 }
707 
708 static void
709 genet_start_locked(struct genet_softc *sc)
710 {
711 	struct ifnet *ifp = &sc->sc_ec.ec_if;
712 	struct mbuf *m;
713 	int nsegs, index, cnt;
714 
715 	GENET_ASSERT_LOCKED(sc);
716 
717 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
718 		return;
719 
720 	const int qid = GENET_DMA_DEFAULT_QUEUE;
721 
722 	index = sc->sc_tx.pidx & (TX_DESC_COUNT - 1);
723 	cnt = 0;
724 
725 	for (;;) {
726 		IFQ_POLL(&ifp->if_snd, m);
727 		if (m == NULL)
728 			break;
729 
730 		nsegs = genet_setup_txbuf(sc, index, m);
731 		if (nsegs <= 0) {
732 			if (nsegs == -1)
733 				ifp->if_flags |= IFF_OACTIVE;
734 			break;
735 		}
736 		IFQ_DEQUEUE(&ifp->if_snd, m);
737 		bpf_mtap(ifp, m, BPF_D_OUT);
738 
739 		index = TX_SKIP(index, nsegs);
740 
741 		sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff;
742 		cnt++;
743 	}
744 
745 	if (cnt != 0)
746 		WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
747 }
748 
749 static void
750 genet_start(struct ifnet *ifp)
751 {
752 	struct genet_softc *sc = ifp->if_softc;
753 
754 	GENET_LOCK(sc);
755 	genet_start_locked(sc);
756 	GENET_UNLOCK(sc);
757 }
758 
759 int
760 genet_intr(void *arg)
761 {
762 	struct genet_softc *sc = arg;
763 	struct ifnet *ifp = &sc->sc_ec.ec_if;
764 	uint32_t val;
765 
766 	GENET_LOCK(sc);
767 
768 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
769 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
770 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
771 
772 	if (val & GENET_IRQ_RXDMA_DONE)
773 		genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE);
774 
775 	if (val & GENET_IRQ_TXDMA_DONE) {
776 		genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE);
777 		if_schedule_deferred_start(ifp);
778 	}
779 
780 	GENET_UNLOCK(sc);
781 
782 	return 1;
783 }
784 
785 static int
786 genet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
787 {
788 	struct genet_softc *sc = ifp->if_softc;
789 	int error, s;
790 
791 #ifndef GENET_MPSAFE
792 	s = splnet();
793 #endif
794 
795 	switch (cmd) {
796 	default:
797 #ifdef GENET_MPSAFE
798 		s = splnet();
799 #endif
800 		error = ether_ioctl(ifp, cmd, data);
801 #ifdef GENET_MPSAFE
802 		splx(s);
803 #endif
804 		if (error != ENETRESET)
805 			break;
806 
807 		error = 0;
808 
809 		if (cmd == SIOCSIFCAP)
810 			error = (*ifp->if_init)(ifp);
811 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
812 			;
813 		else if ((ifp->if_flags & IFF_RUNNING) != 0) {
814 			GENET_LOCK(sc);
815 			genet_setup_rxfilter(sc);
816 			GENET_UNLOCK(sc);
817 		}
818 		break;
819 	}
820 
821 #ifndef GENET_MPSAFE
822 	splx(s);
823 #endif
824 
825 	return error;
826 }
827 
828 static void
829 genet_get_eaddr(struct genet_softc *sc, uint8_t *eaddr)
830 {
831 	prop_dictionary_t prop = device_properties(sc->sc_dev);
832 	uint32_t maclo, machi, val;
833 	prop_data_t eaprop;
834 
835 	eaprop = prop_dictionary_get(prop, "mac-address");
836 	if (eaprop != NULL) {
837 		KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
838 		KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
839 		memcpy(eaddr, prop_data_value(eaprop),
840 		    ETHER_ADDR_LEN);
841 		return;
842 	}
843 
844 	maclo = machi = 0;
845 
846 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
847 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
848 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
849 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
850 	}
851 
852 	if (maclo == 0 && machi == 0) {
853 		/* Create one */
854 		maclo = 0x00f2 | (cprng_strong32() & 0xffff0000);
855 		machi = cprng_strong32() & 0xffff;
856 	}
857 
858 	eaddr[0] = maclo & 0xff;
859 	eaddr[1] = (maclo >> 8) & 0xff;
860 	eaddr[2] = (maclo >> 16) & 0xff;
861 	eaddr[3] = (maclo >> 24) & 0xff;
862 	eaddr[4] = machi & 0xff;
863 	eaddr[5] = (machi >> 8) & 0xff;
864 }
865 
866 static int
867 genet_setup_dma(struct genet_softc *sc, int qid)
868 {
869 	struct mbuf *m;
870 	int error, i;
871 
872 	/* Setup TX ring */
873 	sc->sc_tx.buf_tag = sc->sc_dmat;
874 	for (i = 0; i < TX_DESC_COUNT; i++) {
875 		error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES,
876 		    TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK,
877 		    &sc->sc_tx.buf_map[i].map);
878 		if (error != 0) {
879 			device_printf(sc->sc_dev,
880 			    "cannot create TX buffer map\n");
881 			return error;
882 		}
883 	}
884 
885 	/* Setup RX ring */
886 	sc->sc_rx.buf_tag = sc->sc_dmat;
887 	for (i = 0; i < RX_DESC_COUNT; i++) {
888 		error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES,
889 		    1, MCLBYTES, 0, BUS_DMA_WAITOK,
890 		    &sc->sc_rx.buf_map[i].map);
891 		if (error != 0) {
892 			device_printf(sc->sc_dev,
893 			    "cannot create RX buffer map\n");
894 			return error;
895 		}
896 		if ((m = genet_alloc_mbufcl(sc)) == NULL) {
897 			device_printf(sc->sc_dev, "cannot allocate RX mbuf\n");
898 			return ENOMEM;
899 		}
900 		error = genet_setup_rxbuf(sc, i, m);
901 		if (error != 0) {
902 			device_printf(sc->sc_dev, "cannot create RX buffer\n");
903 			return error;
904 		}
905 	}
906 
907 	return 0;
908 }
909 
910 int
911 genet_attach(struct genet_softc *sc)
912 {
913 	struct mii_data *mii = &sc->sc_mii;
914 	struct ifnet *ifp = &sc->sc_ec.ec_if;
915 	uint8_t eaddr[ETHER_ADDR_LEN];
916 	u_int maj, min;
917 	int mii_flags = 0;
918 
919 	const uint32_t rev = RD4(sc, GENET_SYS_REV_CTRL);
920 	min = __SHIFTOUT(rev, SYS_REV_MINOR);
921 	maj = __SHIFTOUT(rev, SYS_REV_MAJOR);
922 	if (maj == 0)
923 		maj++;
924 	else if (maj == 5 || maj == 6)
925 		maj--;
926 
927 	if (maj != 5) {
928 		aprint_error(": GENETv%d.%d not supported\n", maj, min);
929 		return ENXIO;
930 	}
931 
932 	switch (sc->sc_phy_mode) {
933 	case GENET_PHY_MODE_RGMII_TXID:
934 		mii_flags |= MIIF_TXID;
935 		break;
936 	case GENET_PHY_MODE_RGMII_RXID:
937 		mii_flags |= MIIF_RXID;
938 		break;
939 	case GENET_PHY_MODE_RGMII_ID:
940 		mii_flags |= MIIF_RXID | MIIF_TXID;
941 		break;
942 	case GENET_PHY_MODE_RGMII:
943 	default:
944 		break;
945 	}
946 
947 	aprint_naive("\n");
948 	aprint_normal(": GENETv%d.%d\n", maj, min);
949 
950 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET);
951 	callout_init(&sc->sc_stat_ch, CALLOUT_FLAGS);
952 	callout_setfunc(&sc->sc_stat_ch, genet_tick, sc);
953 
954 	genet_get_eaddr(sc, eaddr);
955 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr));
956 
957 	/* Soft reset EMAC core */
958 	genet_reset(sc);
959 
960 	/* Setup DMA descriptors */
961 	if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) {
962 		aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n");
963 		return EINVAL;
964 	}
965 
966 	/* Setup ethernet interface */
967 	ifp->if_softc = sc;
968 	snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev));
969 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
970 #ifdef GENET_MPSAFE
971 	ifp->if_extflags = IFEF_MPSAFE;
972 #endif
973 	ifp->if_start = genet_start;
974 	ifp->if_ioctl = genet_ioctl;
975 	ifp->if_init = genet_init;
976 	ifp->if_stop = genet_stop;
977 	ifp->if_capabilities = 0;
978 	ifp->if_capenable = ifp->if_capabilities;
979 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
980 	IFQ_SET_READY(&ifp->if_snd);
981 
982 	/* 802.1Q VLAN-sized frames are supported */
983 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
984 
985 	/* Attach MII driver */
986 	sc->sc_ec.ec_mii = mii;
987 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
988 	mii->mii_ifp = ifp;
989 	mii->mii_readreg = genet_mii_readreg;
990 	mii->mii_writereg = genet_mii_writereg;
991 	mii->mii_statchg = genet_mii_statchg;
992 	mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY,
993 	    mii_flags);
994 
995 	if (LIST_EMPTY(&mii->mii_phys)) {
996 		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
997 		return ENOENT;
998 	}
999 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1000 
1001 	/* Attach interface */
1002 	if_attach(ifp);
1003 	if_deferred_start_init(ifp, NULL);
1004 
1005 	/* Attach ethernet interface */
1006 	ether_ifattach(ifp, eaddr);
1007 
1008 	return 0;
1009 }
1010 
1011 #ifdef DDB
1012 void	genet_debug(void);
1013 
1014 void
1015 genet_debug(void)
1016 {
1017 	device_t dev = device_find_by_xname("genet0");
1018 	if (dev == NULL)
1019 		return;
1020 
1021 	struct genet_softc * const sc = device_private(dev);
1022 	const int qid = GENET_DMA_DEFAULT_QUEUE;
1023 
1024 	printf("TX CIDX = %08x (soft)\n", sc->sc_tx.cidx);
1025 	printf("TX CIDX = %08x\n", RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)));
1026 	printf("TX PIDX = %08x (soft)\n", sc->sc_tx.pidx);
1027 	printf("TX PIDX = %08x\n", RD4(sc, GENET_TX_DMA_PROD_INDEX(qid)));
1028 
1029 	printf("RX CIDX = %08x (soft)\n", sc->sc_rx.cidx);
1030 	printf("RX CIDX = %08x\n", RD4(sc, GENET_RX_DMA_CONS_INDEX(qid)));
1031 	printf("RX PIDX = %08x (soft)\n", sc->sc_rx.pidx);
1032 	printf("RX PIDX = %08x\n", RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)));
1033 }
1034 #endif
1035