xref: /openbsd-src/sys/dev/ic/bcmgenet.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /* $OpenBSD: bcmgenet.c,v 1.4 2020/12/12 11:48:52 jan Exp $ */
2 /* $NetBSD: bcmgenet.c,v 1.3 2020/02/27 17:30:07 jmcneill Exp $ */
3 
4 /*-
5  * Copyright (c) 2020 Jared McNeill <jmcneill@invisible.ca>
6  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Broadcom GENETv5
33  */
34 
35 #include <sys/param.h>
36 #include <sys/device.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/mbuf.h>
40 #include <sys/queue.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/timeout.h>
44 
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/bpf.h>
49 
50 #include <netinet/in.h>
51 #include <netinet/if_ether.h>
52 
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55 
56 #include <dev/mii/miivar.h>
57 
58 #include <dev/ic/bcmgenetreg.h>
59 #include <dev/ic/bcmgenetvar.h>
60 
61 CTASSERT(MCLBYTES == 2048);
62 
63 #ifdef GENET_DEBUG
64 #define	DPRINTF(...)	printf(##__VA_ARGS__)
65 #else
66 #define	DPRINTF(...)	((void)0)
67 #endif
68 
69 #define	TX_SKIP(n, o)		(((n) + (o)) & (GENET_DMA_DESC_COUNT - 1))
70 #define	TX_NEXT(n)		TX_SKIP(n, 1)
71 #define	RX_NEXT(n)		(((n) + 1) & (GENET_DMA_DESC_COUNT - 1))
72 
73 #define	TX_MAX_SEGS		128
74 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
75 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
76 #define	MII_BUSY_RETRY		1000
77 #define	GENET_MAX_MDF_FILTER	17
78 
79 #define	RD4(sc, reg)			\
80 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
81 #define	WR4(sc, reg, val)		\
82 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
83 
84 struct cfdriver bse_cd = {
85 	0, "bse", DV_IFNET
86 };
87 
88 int
89 genet_media_change(struct ifnet *ifp)
90 {
91 	struct genet_softc *sc = ifp->if_softc;
92 
93 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
94 		mii_mediachg(&sc->sc_mii);
95 
96 	return (0);
97 }
98 
99 void
100 genet_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
101 {
102 	struct genet_softc *sc = ifp->if_softc;
103 
104 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
105 		mii_pollstat(&sc->sc_mii);
106 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
107 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
108 	}
109 }
110 
111 int
112 genet_mii_readreg(struct device *dev, int phy, int reg)
113 {
114 	struct genet_softc *sc = (struct genet_softc *)dev;
115 	int retry;
116 
117 	WR4(sc, GENET_MDIO_CMD,
118 	    GENET_MDIO_READ | GENET_MDIO_START_BUSY |
119 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
120 	    __SHIFTIN(reg, GENET_MDIO_REG));
121 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
122 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
123 			return RD4(sc, GENET_MDIO_CMD) & 0xffff;
124 		delay(10);
125 	}
126 
127 	printf("%s: phy read timeout, phy=%d reg=%d\n",
128 	    sc->sc_dev.dv_xname, phy, reg);
129 	return 0;
130 }
131 
132 void
133 genet_mii_writereg(struct device *dev, int phy, int reg, int val)
134 {
135 	struct genet_softc *sc = (struct genet_softc *)dev;
136 	int retry;
137 
138 	WR4(sc, GENET_MDIO_CMD,
139 	    val | GENET_MDIO_WRITE | GENET_MDIO_START_BUSY |
140 	    __SHIFTIN(phy, GENET_MDIO_PMD) |
141 	    __SHIFTIN(reg, GENET_MDIO_REG));
142 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
143 		if ((RD4(sc, GENET_MDIO_CMD) & GENET_MDIO_START_BUSY) == 0)
144 			return;
145 		delay(10);
146 	}
147 
148 	printf("%s: phy write timeout, phy=%d reg=%d\n",
149 	    sc->sc_dev.dv_xname, phy, reg);
150 }
151 
152 void
153 genet_update_link(struct genet_softc *sc)
154 {
155 	struct mii_data *mii = &sc->sc_mii;
156 	uint32_t val;
157 	u_int speed;
158 
159 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
160 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
161 		speed = GENET_UMAC_CMD_SPEED_1000;
162 	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
163 		speed = GENET_UMAC_CMD_SPEED_100;
164 	else
165 		speed = GENET_UMAC_CMD_SPEED_10;
166 
167 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
168 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
169 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
170 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
171 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII)
172 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
173 	else
174 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
175 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
176 
177 	val = RD4(sc, GENET_UMAC_CMD);
178 	val &= ~GENET_UMAC_CMD_SPEED;
179 	val |= __SHIFTIN(speed, GENET_UMAC_CMD_SPEED);
180 	WR4(sc, GENET_UMAC_CMD, val);
181 }
182 
183 void
184 genet_mii_statchg(struct device *self)
185 {
186 	struct genet_softc *sc = (struct genet_softc *)self;
187 
188 	genet_update_link(sc);
189 }
190 
191 void
192 genet_setup_txdesc(struct genet_softc *sc, int index, int flags,
193     bus_addr_t paddr, u_int len)
194 {
195 	uint32_t status;
196 
197 	status = flags | __SHIFTIN(len, GENET_TX_DESC_STATUS_BUFLEN);
198 	++sc->sc_tx.queued;
199 
200 	WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
201 	WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
202 	WR4(sc, GENET_TX_DESC_STATUS(index), status);
203 }
204 
205 int
206 genet_setup_txbuf(struct genet_softc *sc, int index, struct mbuf *m)
207 {
208 	bus_dma_segment_t *segs;
209 	int error, nsegs, cur, i;
210 	uint32_t flags;
211 
212 	/*
213 	 * XXX Hardware doesn't seem to like small fragments.  For now
214 	 * just look at the first fragment and defrag if it is smaller
215 	 * than the minimum Ethernet packet size.
216 	 */
217 	if (m->m_len < ETHER_MIN_LEN - ETHER_CRC_LEN) {
218 		if (m_defrag(m, M_DONTWAIT))
219 			return 0;
220 	}
221 
222 	error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
223 	    sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
224 	if (error == EFBIG) {
225 		if (m_defrag(m, M_DONTWAIT))
226 			return 0;
227 		error = bus_dmamap_load_mbuf(sc->sc_tx.buf_tag,
228 		    sc->sc_tx.buf_map[index].map, m,
229 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
230 	}
231 	if (error != 0)
232 		return 0;
233 
234 	segs = sc->sc_tx.buf_map[index].map->dm_segs;
235 	nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs;
236 
237 	if (sc->sc_tx.queued >= GENET_DMA_DESC_COUNT - nsegs) {
238 		bus_dmamap_unload(sc->sc_tx.buf_tag,
239 		    sc->sc_tx.buf_map[index].map);
240 		return -1;
241 	}
242 
243 	flags = GENET_TX_DESC_STATUS_SOP |
244 		GENET_TX_DESC_STATUS_CRC |
245 		GENET_TX_DESC_STATUS_QTAG;
246 
247 	for (cur = index, i = 0; i < nsegs; i++) {
248 		sc->sc_tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
249 		if (i == nsegs - 1)
250 			flags |= GENET_TX_DESC_STATUS_EOP;
251 
252 		genet_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
253 		    segs[i].ds_len);
254 
255 		if (i == 0) {
256 			flags &= ~GENET_TX_DESC_STATUS_SOP;
257 			flags &= ~GENET_TX_DESC_STATUS_CRC;
258 		}
259 		cur = TX_NEXT(cur);
260 	}
261 
262 	bus_dmamap_sync(sc->sc_tx.buf_tag, sc->sc_tx.buf_map[index].map,
263 	    0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE);
264 
265 	return nsegs;
266 }
267 
268 void
269 genet_setup_rxdesc(struct genet_softc *sc, int index,
270     bus_addr_t paddr, bus_size_t len)
271 {
272 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)paddr);
273 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(paddr >> 32));
274 }
275 
276 int
277 genet_setup_rxbuf(struct genet_softc *sc, int index, struct mbuf *m)
278 {
279 	int error;
280 
281 	error = bus_dmamap_load_mbuf(sc->sc_rx.buf_tag,
282 	    sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
283 	if (error != 0)
284 		return error;
285 
286 	bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
287 	    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
288 	    BUS_DMASYNC_PREREAD);
289 
290 	sc->sc_rx.buf_map[index].mbuf = m;
291 	genet_setup_rxdesc(sc, index,
292 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr,
293 	    sc->sc_rx.buf_map[index].map->dm_segs[0].ds_len);
294 
295 	return 0;
296 }
297 
298 struct mbuf *
299 genet_alloc_mbufcl(struct genet_softc *sc)
300 {
301 	struct mbuf *m;
302 
303 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
304 	if (m != NULL)
305 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
306 
307 	return m;
308 }
309 
310 void
311 genet_fill_rx_ring(struct genet_softc *sc, int qid)
312 {
313 	struct mbuf *m;
314 	uint32_t cidx, index, total;
315 	u_int slots;
316 	int error;
317 
318 	cidx = sc->sc_rx.cidx;
319 	total = (sc->sc_rx.pidx - cidx) & 0xffff;
320 	KASSERT(total <= RX_DESC_COUNT);
321 
322 	index = sc->sc_rx.cidx & (RX_DESC_COUNT - 1);
323 	for (slots = if_rxr_get(&sc->sc_rx_ring, total);
324 	     slots > 0; slots--) {
325 		if ((m = genet_alloc_mbufcl(sc)) == NULL) {
326 			printf("%s: cannot allocate RX mbuf\n",
327 			    sc->sc_dev.dv_xname);
328 			break;
329 		}
330 		error = genet_setup_rxbuf(sc, index, m);
331 		if (error != 0) {
332 			printf("%s: cannot create RX buffer\n",
333 			    sc->sc_dev.dv_xname);
334 			m_freem(m);
335 			break;
336 		}
337 
338 		cidx = (cidx + 1) & 0xffff;
339 		index = RX_NEXT(index);
340 	}
341 	if_rxr_put(&sc->sc_rx_ring, slots);
342 
343 	if (sc->sc_rx.cidx != cidx) {
344 		sc->sc_rx.cidx = cidx;
345 		WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
346 	}
347 
348 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
349 		timeout_add(&sc->sc_rxto, 1);
350 }
351 
352 void
353 genet_rxtick(void *arg)
354 {
355 	genet_fill_rx_ring(arg, GENET_DMA_DEFAULT_QUEUE);
356 }
357 
358 void
359 genet_enable_intr(struct genet_softc *sc)
360 {
361 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
362 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
363 }
364 
365 void
366 genet_disable_intr(struct genet_softc *sc)
367 {
368 	/* Disable interrupts */
369 	WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
370 	WR4(sc, GENET_INTRL2_CPU_CLEAR, 0xffffffff);
371 }
372 
373 void
374 genet_tick(void *softc)
375 {
376 	struct genet_softc *sc = softc;
377 	struct mii_data *mii = &sc->sc_mii;
378 	int s = splnet();
379 
380 	mii_tick(mii);
381 	timeout_add_sec(&sc->sc_stat_ch, 1);
382 
383 	splx(s);
384 }
385 
386 void
387 genet_setup_rxfilter_mdf(struct genet_softc *sc, u_int n, const uint8_t *ea)
388 {
389 	uint32_t addr0 = (ea[0] << 8) | ea[1];
390 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
391 
392 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
393 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
394 }
395 
396 void
397 genet_setup_rxfilter(struct genet_softc *sc)
398 {
399 	struct arpcom *ac = &sc->sc_ac;
400 	struct ifnet *ifp = &ac->ac_if;
401 	struct ether_multistep step;
402 	struct ether_multi *enm;
403 	uint32_t cmd, mdf_ctrl;
404 	u_int n;
405 
406 	cmd = RD4(sc, GENET_UMAC_CMD);
407 
408 	/*
409 	 * Count the required number of hardware filters. We need one
410 	 * for each multicast address, plus one for our own address and
411 	 * the broadcast address.
412 	 */
413 	ETHER_FIRST_MULTI(step, ac, enm);
414 	for (n = 2; enm != NULL; n++)
415 		ETHER_NEXT_MULTI(step, enm);
416 
417 	if (n > GENET_MAX_MDF_FILTER || ac->ac_multirangecnt > 0)
418 		ifp->if_flags |= IFF_ALLMULTI;
419 	else
420 		ifp->if_flags &= ~IFF_ALLMULTI;
421 
422 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
423 		cmd |= GENET_UMAC_CMD_PROMISC;
424 		mdf_ctrl = 0;
425 	} else {
426 		cmd &= ~GENET_UMAC_CMD_PROMISC;
427 		genet_setup_rxfilter_mdf(sc, 0, etherbroadcastaddr);
428 		genet_setup_rxfilter_mdf(sc, 1, LLADDR(ifp->if_sadl));
429 		ETHER_FIRST_MULTI(step, ac, enm);
430 		for (n = 2; enm != NULL; n++) {
431 			genet_setup_rxfilter_mdf(sc, n, enm->enm_addrlo);
432 			ETHER_NEXT_MULTI(step, enm);
433 		}
434 		mdf_ctrl = __BITS(GENET_MAX_MDF_FILTER - 1,
435 				  GENET_MAX_MDF_FILTER - n);
436 	}
437 
438 	WR4(sc, GENET_UMAC_CMD, cmd);
439 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
440 }
441 
442 int
443 genet_reset(struct genet_softc *sc)
444 {
445 	uint32_t val;
446 
447 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
448 	val |= GENET_SYS_RBUF_FLUSH_RESET;
449 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
450 	delay(10);
451 
452 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
453 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
454 	delay(10);
455 
456 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
457 	delay(10);
458 
459 	WR4(sc, GENET_UMAC_CMD, 0);
460 	WR4(sc, GENET_UMAC_CMD,
461 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
462 	delay(10);
463 	WR4(sc, GENET_UMAC_CMD, 0);
464 
465 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
466 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
467 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
468 
469 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
470 
471 	val = RD4(sc, GENET_RBUF_CTRL);
472 	val |= GENET_RBUF_ALIGN_2B;
473 	WR4(sc, GENET_RBUF_CTRL, val);
474 
475 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
476 
477 	return 0;
478 }
479 
480 void
481 genet_init_rings(struct genet_softc *sc, int qid)
482 {
483 	uint32_t val;
484 
485 	/* TX ring */
486 
487 	sc->sc_tx.next = 0;
488 	sc->sc_tx.queued = 0;
489 	sc->sc_tx.cidx = sc->sc_tx.pidx = 0;
490 
491 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
492 
493 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
494 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
495 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), sc->sc_tx.cidx);
496 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
497 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
498 	    __SHIFTIN(TX_DESC_COUNT, GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT) |
499 	    __SHIFTIN(MCLBYTES, GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH));
500 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
501 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
502 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
503 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
504 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
505 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
506 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
507 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
508 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
509 
510 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
511 
512 	/* Enable transmit DMA */
513 	val = RD4(sc, GENET_TX_DMA_CTRL);
514 	val |= GENET_TX_DMA_CTRL_EN;
515 	val |= GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
516 	WR4(sc, GENET_TX_DMA_CTRL, val);
517 
518 	/* RX ring */
519 
520 	sc->sc_rx.next = 0;
521 	sc->sc_rx.cidx = 0;
522 	sc->sc_rx.pidx = RX_DESC_COUNT;
523 
524 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
525 
526 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
527 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
528 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), sc->sc_rx.pidx);
529 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), sc->sc_rx.cidx);
530 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
531 	    __SHIFTIN(RX_DESC_COUNT, GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT) |
532 	    __SHIFTIN(MCLBYTES, GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH));
533 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
534 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
535 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
536 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
537 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
538 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
539 	    __SHIFTIN(5, GENET_RX_DMA_XON_XOFF_THRES_LO) |
540 	    __SHIFTIN(RX_DESC_COUNT >> 4, GENET_RX_DMA_XON_XOFF_THRES_HI));
541 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
542 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
543 
544 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
545 
546 	if_rxr_init(&sc->sc_rx_ring, 2, RX_DESC_COUNT);
547 	genet_fill_rx_ring(sc, qid);
548 
549 	/* Enable receive DMA */
550 	val = RD4(sc, GENET_RX_DMA_CTRL);
551 	val |= GENET_RX_DMA_CTRL_EN;
552 	val |= GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
553 	WR4(sc, GENET_RX_DMA_CTRL, val);
554 }
555 
556 int
557 genet_init(struct genet_softc *sc)
558 {
559 	struct ifnet *ifp = &sc->sc_ac.ac_if;
560 	struct mii_data *mii = &sc->sc_mii;
561 	uint32_t val;
562 	uint8_t *enaddr = LLADDR(ifp->if_sadl);
563 
564 	if (ifp->if_flags & IFF_RUNNING)
565 		return 0;
566 
567 	if (sc->sc_phy_mode == GENET_PHY_MODE_RGMII ||
568 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_ID ||
569 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_RXID ||
570 	    sc->sc_phy_mode == GENET_PHY_MODE_RGMII_TXID)
571 		WR4(sc, GENET_SYS_PORT_CTRL,
572 		    GENET_SYS_PORT_MODE_EXT_GPHY);
573 
574 	/* Write hardware address */
575 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
576 	    (enaddr[0] << 24);
577 	WR4(sc, GENET_UMAC_MAC0, val);
578 	val = enaddr[5] | (enaddr[4] << 8);
579 	WR4(sc, GENET_UMAC_MAC1, val);
580 
581 	/* Setup RX filter */
582 	genet_setup_rxfilter(sc);
583 
584 	/* Setup TX/RX rings */
585 	genet_init_rings(sc, GENET_DMA_DEFAULT_QUEUE);
586 
587 	/* Enable transmitter and receiver */
588 	val = RD4(sc, GENET_UMAC_CMD);
589 	val |= GENET_UMAC_CMD_TXEN;
590 	val |= GENET_UMAC_CMD_RXEN;
591 	WR4(sc, GENET_UMAC_CMD, val);
592 
593 	/* Enable interrupts */
594 	genet_enable_intr(sc);
595 
596 	ifp->if_flags |= IFF_RUNNING;
597 	ifq_clr_oactive(&ifp->if_snd);
598 
599 	mii_mediachg(mii);
600 	timeout_add_sec(&sc->sc_stat_ch, 1);
601 
602 	return 0;
603 }
604 
605 void
606 genet_stop(struct genet_softc *sc)
607 {
608 	struct ifnet *ifp = &sc->sc_ac.ac_if;
609 	struct genet_bufmap *bmap;
610 	uint32_t val;
611 	int i;
612 
613 	timeout_del(&sc->sc_rxto);
614 	timeout_del(&sc->sc_stat_ch);
615 
616 	mii_down(&sc->sc_mii);
617 
618 	/* Disable receiver */
619 	val = RD4(sc, GENET_UMAC_CMD);
620 	val &= ~GENET_UMAC_CMD_RXEN;
621 	WR4(sc, GENET_UMAC_CMD, val);
622 
623 	/* Stop receive DMA */
624 	val = RD4(sc, GENET_RX_DMA_CTRL);
625 	val &= ~GENET_RX_DMA_CTRL_EN;
626 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
627 	WR4(sc, GENET_RX_DMA_CTRL, val);
628 
629 	/* Stop transmit DMA */
630 	val = RD4(sc, GENET_TX_DMA_CTRL);
631 	val &= ~GENET_TX_DMA_CTRL_EN;
632 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
633 	WR4(sc, GENET_TX_DMA_CTRL, val);
634 
635 	/* Flush data in the TX FIFO */
636 	WR4(sc, GENET_UMAC_TX_FLUSH, 1);
637 	delay(10);
638 	WR4(sc, GENET_UMAC_TX_FLUSH, 0);
639 
640 	/* Disable transmitter */
641 	val = RD4(sc, GENET_UMAC_CMD);
642 	val &= ~GENET_UMAC_CMD_TXEN;
643 	WR4(sc, GENET_UMAC_CMD, val);
644 
645 	/* Disable interrupts */
646 	genet_disable_intr(sc);
647 
648 	ifp->if_flags &= ~IFF_RUNNING;
649 	ifq_clr_oactive(&ifp->if_snd);
650 	ifp->if_timer = 0;
651 
652 	intr_barrier(sc->sc_ih);
653 
654 	/* Clean RX ring. */
655 	for (i = 0; i < RX_DESC_COUNT; i++) {
656 		bmap = &sc->sc_rx.buf_map[i];
657 		if (bmap->mbuf) {
658 			bus_dmamap_sync(sc->sc_dmat, bmap->map, 0,
659 			    bmap->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
660 			bus_dmamap_unload(sc->sc_dmat, bmap->map);
661 			m_freem(bmap->mbuf);
662 			bmap->mbuf = NULL;
663 		}
664 	}
665 
666 	/* Clean TX ring. */
667 	for (i = 0; i < TX_DESC_COUNT; i++) {
668 		bmap = &sc->sc_tx.buf_map[i];
669 		if (bmap->mbuf) {
670 			bus_dmamap_sync(sc->sc_dmat, bmap->map, 0,
671 			    bmap->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
672 			bus_dmamap_unload(sc->sc_dmat, bmap->map);
673 			m_freem(bmap->mbuf);
674 			bmap->mbuf = NULL;
675 		}
676 	}
677 }
678 
679 void
680 genet_rxintr(struct genet_softc *sc, int qid)
681 {
682 	struct ifnet *ifp = &sc->sc_ac.ac_if;
683 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
684 	struct mbuf *m;
685 	int index, len, n;
686 	uint32_t status, pidx, total;
687 
688 	pidx = RD4(sc, GENET_RX_DMA_PROD_INDEX(qid)) & 0xffff;
689 	total = (pidx - sc->sc_rx.pidx) & 0xffff;
690 
691 	DPRINTF("RX pidx=%08x total=%d\n", pidx, total);
692 
693 	index = sc->sc_rx.next;
694 	for (n = 0; n < total; n++) {
695 		status = RD4(sc, GENET_RX_DESC_STATUS(index));
696 		len = __SHIFTOUT(status, GENET_RX_DESC_STATUS_BUFLEN);
697 
698 		/* XXX check for errors */
699 
700 		bus_dmamap_sync(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map,
701 		    0, sc->sc_rx.buf_map[index].map->dm_mapsize,
702 		    BUS_DMASYNC_POSTREAD);
703 		bus_dmamap_unload(sc->sc_rx.buf_tag, sc->sc_rx.buf_map[index].map);
704 
705 		DPRINTF("RX [#%d] index=%02x status=%08x len=%d adj_len=%d\n",
706 		    n, index, status, len, len - ETHER_ALIGN);
707 
708 		m = sc->sc_rx.buf_map[index].mbuf;
709 		sc->sc_rx.buf_map[index].mbuf = NULL;
710 
711 		if (len > ETHER_ALIGN) {
712 			m_adj(m, ETHER_ALIGN);
713 
714 			m->m_len = m->m_pkthdr.len = len - ETHER_ALIGN;
715 			m->m_nextpkt = NULL;
716 
717 			ml_enqueue(&ml, m);
718 		} else {
719 			ifp->if_ierrors++;
720 			m_freem(m);
721 		}
722 
723 		if_rxr_put(&sc->sc_rx_ring, 1);
724 
725 		index = RX_NEXT(index);
726 	}
727 
728 	if (sc->sc_rx.pidx != pidx) {
729 		sc->sc_rx.next = index;
730 		sc->sc_rx.pidx = pidx;
731 
732 		if (ifiq_input(&ifp->if_rcv, &ml))
733 			if_rxr_livelocked(&sc->sc_rx_ring);
734 
735 		genet_fill_rx_ring(sc, qid);
736 	}
737 }
738 
739 void
740 genet_txintr(struct genet_softc *sc, int qid)
741 {
742 	struct ifnet *ifp = &sc->sc_ac.ac_if;
743 	struct genet_bufmap *bmap;
744 	uint32_t cidx, total;
745 	int i;
746 
747 	cidx = RD4(sc, GENET_TX_DMA_CONS_INDEX(qid)) & 0xffff;
748 	total = (cidx - sc->sc_tx.cidx) & 0xffff;
749 
750 	for (i = sc->sc_tx.next; sc->sc_tx.queued > 0 && total > 0;
751 	     i = TX_NEXT(i), total--) {
752 		/* XXX check for errors */
753 
754 		bmap = &sc->sc_tx.buf_map[i];
755 		if (bmap->mbuf != NULL) {
756 			bus_dmamap_sync(sc->sc_tx.buf_tag, bmap->map,
757 			    0, bmap->map->dm_mapsize,
758 			    BUS_DMASYNC_POSTWRITE);
759 			bus_dmamap_unload(sc->sc_tx.buf_tag, bmap->map);
760 			m_freem(bmap->mbuf);
761 			bmap->mbuf = NULL;
762 		}
763 
764 		--sc->sc_tx.queued;
765 	}
766 
767 	if (sc->sc_tx.queued == 0)
768 		ifp->if_timer = 0;
769 
770 	if (sc->sc_tx.cidx != cidx) {
771 		sc->sc_tx.next = i;
772 		sc->sc_tx.cidx = cidx;
773 
774 		if (ifq_is_oactive(&ifp->if_snd))
775 			ifq_restart(&ifp->if_snd);
776 	}
777 }
778 
779 void
780 genet_start(struct ifnet *ifp)
781 {
782 	struct genet_softc *sc = ifp->if_softc;
783 	struct mbuf *m;
784 	const int qid = GENET_DMA_DEFAULT_QUEUE;
785 	int nsegs, index, cnt;
786 
787 	if ((ifp->if_flags & IFF_RUNNING) == 0)
788 		return;
789 	if (ifq_is_oactive(&ifp->if_snd))
790 		return;
791 
792 	index = sc->sc_tx.pidx & (TX_DESC_COUNT - 1);
793 	cnt = 0;
794 
795 	for (;;) {
796 		m = ifq_deq_begin(&ifp->if_snd);
797 		if (m == NULL)
798 			break;
799 
800 		nsegs = genet_setup_txbuf(sc, index, m);
801 		if (nsegs == -1) {
802 			ifq_deq_rollback(&ifp->if_snd, m);
803 			ifq_set_oactive(&ifp->if_snd);
804 			break;
805 		}
806 		if (nsegs == 0) {
807 			ifq_deq_commit(&ifp->if_snd, m);
808 			m_freem(m);
809 			ifp->if_oerrors++;
810 			continue;
811 		}
812 		ifq_deq_commit(&ifp->if_snd, m);
813 		if (ifp->if_bpf)
814 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
815 
816 		index = TX_SKIP(index, nsegs);
817 
818 		sc->sc_tx.pidx = (sc->sc_tx.pidx + nsegs) & 0xffff;
819 		cnt++;
820 	}
821 
822 	if (cnt != 0) {
823 		WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), sc->sc_tx.pidx);
824 		ifp->if_timer = 5;
825 	}
826 }
827 
828 int
829 genet_intr(void *arg)
830 {
831 	struct genet_softc *sc = arg;
832 	struct ifnet *ifp = &sc->sc_ac.ac_if;
833 	uint32_t val;
834 
835 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
836 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
837 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
838 
839 	if (val & GENET_IRQ_RXDMA_DONE)
840 		genet_rxintr(sc, GENET_DMA_DEFAULT_QUEUE);
841 
842 	if (val & GENET_IRQ_TXDMA_DONE) {
843 		genet_txintr(sc, GENET_DMA_DEFAULT_QUEUE);
844 		if (ifq_is_oactive(&ifp->if_snd))
845 			ifq_restart(&ifp->if_snd);
846 	}
847 
848 	return 1;
849 }
850 
851 int
852 genet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
853 {
854 	struct genet_softc *sc = ifp->if_softc;
855 	struct ifreq *ifr = (struct ifreq *)addr;
856 	int error = 0, s;
857 
858 	s = splnet();
859 
860 	switch (cmd) {
861 	case SIOCSIFADDR:
862 		ifp->if_flags |= IFF_UP;
863 		/* FALLTHROUGH */
864 	case SIOCSIFFLAGS:
865 		if (ifp->if_flags & IFF_UP) {
866 			if (ifp->if_flags & IFF_RUNNING)
867 				error = ENETRESET;
868 			else
869 				genet_init(sc);
870 		} else {
871 			if (ifp->if_flags & IFF_RUNNING)
872 				genet_stop(sc);
873 		}
874 		break;
875 
876 	case SIOCGIFMEDIA:
877 	case SIOCSIFMEDIA:
878 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
879 		break;
880 
881 	case SIOCGIFRXR:
882 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
883 		    NULL, MCLBYTES, &sc->sc_rx_ring);
884 		break;
885 
886 	default:
887 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
888 		break;
889 	}
890 
891 	if (error == ENETRESET) {
892 		if (ifp->if_flags & IFF_RUNNING)
893 			genet_setup_rxfilter(sc);
894 		error = 0;
895 	}
896 
897 	splx(s);
898 	return error;
899 }
900 
901 int
902 genet_setup_dma(struct genet_softc *sc, int qid)
903 {
904 	int error, i;
905 
906 	/* Setup TX ring */
907 	sc->sc_tx.buf_tag = sc->sc_dmat;
908 	for (i = 0; i < TX_DESC_COUNT; i++) {
909 		error = bus_dmamap_create(sc->sc_tx.buf_tag, MCLBYTES,
910 		    TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK,
911 		    &sc->sc_tx.buf_map[i].map);
912 		if (error != 0) {
913 			printf("%s: cannot create TX buffer map\n",
914 			    sc->sc_dev.dv_xname);
915 			return error;
916 		}
917 	}
918 
919 	/* Setup RX ring */
920 	sc->sc_rx.buf_tag = sc->sc_dmat;
921 	for (i = 0; i < RX_DESC_COUNT; i++) {
922 		error = bus_dmamap_create(sc->sc_rx.buf_tag, MCLBYTES,
923 		    1, MCLBYTES, 0, BUS_DMA_WAITOK,
924 		    &sc->sc_rx.buf_map[i].map);
925 		if (error != 0) {
926 			printf("%s: cannot create RX buffer map\n",
927 			    sc->sc_dev.dv_xname);
928 			return error;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 int
936 genet_attach(struct genet_softc *sc)
937 {
938 	struct mii_data *mii = &sc->sc_mii;
939 	struct ifnet *ifp = &sc->sc_ac.ac_if;
940 	int mii_flags = 0;
941 
942 	switch (sc->sc_phy_mode) {
943 	case GENET_PHY_MODE_RGMII_ID:
944 		mii_flags |= MIIF_RXID | MIIF_TXID;
945 		break;
946 	case GENET_PHY_MODE_RGMII_RXID:
947 		mii_flags |= MIIF_RXID;
948 		break;
949 	case GENET_PHY_MODE_RGMII_TXID:
950 		mii_flags |= MIIF_TXID;
951 		break;
952 	case GENET_PHY_MODE_RGMII:
953 	default:
954 		break;
955 	}
956 
957 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
958 
959 	/* Soft reset EMAC core */
960 	genet_reset(sc);
961 
962 	/* Setup DMA descriptors */
963 	if (genet_setup_dma(sc, GENET_DMA_DEFAULT_QUEUE) != 0) {
964 		printf("%s: failed to setup DMA descriptors\n",
965 		    sc->sc_dev.dv_xname);
966 		return EINVAL;
967 	}
968 
969 	timeout_set(&sc->sc_stat_ch, genet_tick, sc);
970 	timeout_set(&sc->sc_rxto, genet_rxtick, sc);
971 
972 	/* Setup ethernet interface */
973 	ifp->if_softc = sc;
974 	snprintf(ifp->if_xname, IFNAMSIZ, "%s", sc->sc_dev.dv_xname);
975 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
976 	ifp->if_start = genet_start;
977 	ifp->if_ioctl = genet_ioctl;
978 	ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
979 
980 	/* 802.1Q VLAN-sized frames are supported */
981 	ifp->if_capabilities = IFCAP_VLAN_MTU;
982 
983 	/* Attach MII driver */
984 	ifmedia_init(&mii->mii_media, 0, genet_media_change, genet_media_status);
985 	mii->mii_ifp = ifp;
986 	mii->mii_readreg = genet_mii_readreg;
987 	mii->mii_writereg = genet_mii_writereg;
988 	mii->mii_statchg = genet_mii_statchg;
989 	mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id,
990 	    MII_OFFSET_ANY, mii_flags);
991 
992 	if (LIST_EMPTY(&mii->mii_phys)) {
993 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
994 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
995 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
996 	}
997 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
998 
999 	/* Attach interface */
1000 	if_attach(ifp);
1001 
1002 	/* Attach ethernet interface */
1003 	ether_ifattach(ifp);
1004 
1005 	return 0;
1006 }
1007 
1008 void
1009 genet_lladdr_read(struct genet_softc *sc, uint8_t *lladdr)
1010 {
1011 	uint32_t maclo, machi;
1012 
1013 	maclo = RD4(sc, GENET_UMAC_MAC0);
1014 	machi = RD4(sc, GENET_UMAC_MAC1);
1015 
1016 	lladdr[0] = (maclo >> 24) & 0xff;
1017 	lladdr[1] = (maclo >> 16) & 0xff;
1018 	lladdr[2] = (maclo >> 8) & 0xff;
1019 	lladdr[3] = (maclo >> 0) & 0xff;
1020 	lladdr[4] = (machi >> 8) & 0xff;
1021 	lladdr[5] = (machi >> 0) & 0xff;
1022 }
1023