xref: /openbsd-src/sys/dev/fdt/if_mvneta.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: if_mvneta.c,v 1.3 2017/10/05 06:32:26 patrick Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 
31 #include <sys/param.h>
32 #include <sys/device.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/errno.h>
36 #include <sys/kernel.h>
37 #include <sys/mutex.h>
38 #include <sys/socket.h>
39 #include <sys/sockio.h>
40 #include <uvm/uvm_extern.h>
41 #include <sys/mbuf.h>
42 
43 #include <machine/bus.h>
44 #include <machine/fdt.h>
45 
46 #include <dev/ofw/openfirm.h>
47 #include <dev/ofw/ofw_clock.h>
48 #include <dev/ofw/ofw_pinctrl.h>
49 #include <dev/ofw/fdt.h>
50 
51 #include <dev/fdt/if_mvnetareg.h>
52 #include <dev/fdt/mvmdiovar.h>
53 
54 #ifdef __armv7__
55 #include <armv7/marvell/mvmbusvar.h>
56 #endif
57 
58 #include <net/if.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 
62 #include <net/bpf.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/if_ether.h>
66 
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
69 
70 #if NBPFILTER > 0
71 #include <net/bpf.h>
72 #endif
73 
74 #ifdef MVNETA_DEBUG
75 #define DPRINTF(x)	if (mvneta_debug) printf x
76 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
77 int mvneta_debug = MVNETA_DEBUG;
78 #else
79 #define DPRINTF(x)
80 #define DPRINTFN(n,x)
81 #endif
82 
83 #define MVNETA_READ(sc, reg) \
84 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
85 #define MVNETA_WRITE(sc, reg, val) \
86 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
87 #define MVNETA_READ_FILTER(sc, reg, val, c) \
88 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
89 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
90 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
91 
92 #define MVNETA_LINKUP_READ(sc) \
93 	MVNETA_READ(sc, MVNETA_PS0)
94 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
95 
96 #define MVNETA_TX_RING_CNT	256
97 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
98 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
99 #define MVNETA_TX_QUEUE_CNT	1
100 #define MVNETA_RX_RING_CNT	256
101 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
102 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
103 #define MVNETA_RX_QUEUE_CNT	1
104 
105 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
106 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
107 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
108 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
109 
110 #define MVNETA_NTXSEG		30
111 
112 struct mvneta_dmamem {
113 	bus_dmamap_t		mdm_map;
114 	bus_dma_segment_t	mdm_seg;
115 	size_t			mdm_size;
116 	caddr_t			mdm_kva;
117 };
118 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
119 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
120 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
121 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
122 
123 struct mvneta_buf {
124 	bus_dmamap_t	tb_map;
125 	struct mbuf	*tb_m;
126 };
127 
128 struct mvneta_softc {
129 	struct device sc_dev;
130 	struct device *sc_mdio;
131 
132 	bus_space_tag_t sc_iot;
133 	bus_space_handle_t sc_ioh;
134 	bus_dma_tag_t sc_dmat;
135 
136 	struct arpcom sc_ac;
137 #define sc_enaddr sc_ac.ac_enaddr
138 	struct mii_data sc_mii;
139 #define sc_media sc_mii.mii_media
140 
141 	struct timeout sc_tick_ch;
142 
143 	struct mvneta_dmamem	*sc_txring;
144 	struct mvneta_buf	*sc_txbuf;
145 	struct mvneta_tx_desc	*sc_txdesc;
146 	int			 sc_tx_prod;	/* next free tx desc */
147 	int			 sc_tx_cnt;	/* amount of tx sent */
148 	int			 sc_tx_cons;	/* first tx desc sent */
149 
150 	struct mvneta_dmamem	*sc_rxring;
151 	struct mvneta_buf	*sc_rxbuf;
152 	struct mvneta_rx_desc	*sc_rxdesc;
153 	int			 sc_rx_prod;	/* next rx desc to fill */
154 	struct if_rxring	 sc_rx_ring;
155 	int			 sc_rx_cons;	/* next rx desc recvd */
156 
157 	enum {
158 		PHY_MODE_QSGMII,
159 		PHY_MODE_SGMII,
160 		PHY_MODE_RGMII,
161 		PHY_MODE_RGMII_ID,
162 	}			 sc_phy_mode;
163 	int			 sc_fixed_link;
164 	int			 sc_phy;
165 	int			 sc_link;
166 };
167 
168 
169 int mvneta_miibus_readreg(struct device *, int, int);
170 void mvneta_miibus_writereg(struct device *, int, int, int);
171 void mvneta_miibus_statchg(struct device *);
172 
173 void mvneta_wininit(struct mvneta_softc *);
174 
175 /* Gigabit Ethernet Port part functions */
176 int mvneta_match(struct device *, void *, void *);
177 void mvneta_attach(struct device *, struct device *, void *);
178 void mvneta_attach_deferred(struct device *);
179 
180 void mvneta_tick(void *);
181 int mvneta_intr(void *);
182 
183 void mvneta_start(struct ifnet *);
184 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
185 void mvneta_port_up(struct mvneta_softc *);
186 int mvneta_up(struct mvneta_softc *);
187 void mvneta_down(struct mvneta_softc *);
188 void mvneta_watchdog(struct ifnet *);
189 
190 int mvneta_mediachange(struct ifnet *);
191 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
192 
193 int mvneta_encap(struct mvneta_softc *, struct mbuf *, uint32_t *);
194 void mvneta_rx_proc(struct mvneta_softc *);
195 void mvneta_tx_proc(struct mvneta_softc *);
196 uint8_t mvneta_crc8(const uint8_t *, size_t);
197 void mvneta_iff(struct mvneta_softc *);
198 
199 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
200     bus_size_t, bus_size_t);
201 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
202 void mvneta_fill_rx_ring(struct mvneta_softc *);
203 
204 struct cfdriver mvneta_cd = {
205 	NULL, "mvneta", DV_IFNET
206 };
207 
208 struct cfattach mvneta_ca = {
209 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
210 };
211 
212 int
213 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
214 {
215 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
216 	return mvmdio_miibus_readreg(sc->sc_mdio, phy, reg);
217 }
218 
219 void
220 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
221 {
222 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
223 	return mvmdio_miibus_writereg(sc->sc_mdio, phy, reg, val);
224 }
225 
226 void
227 mvneta_miibus_statchg(struct device *self)
228 {
229 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
230 
231 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
232 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
233 
234 		panc &= ~(MVNETA_PANC_SETMIISPEED |
235 			  MVNETA_PANC_SETGMIISPEED |
236 			  MVNETA_PANC_SETFULLDX);
237 
238 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
239 		case IFM_1000_SX:
240 		case IFM_1000_LX:
241 		case IFM_1000_CX:
242 		case IFM_1000_T:
243 			panc |= MVNETA_PANC_SETGMIISPEED;
244 			break;
245 		case IFM_100_TX:
246 			panc |= MVNETA_PANC_SETMIISPEED;
247 			break;
248 		case IFM_10_T:
249 			break;
250 		}
251 
252 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
253 			panc |= MVNETA_PANC_SETFULLDX;
254 
255 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
256 	}
257 
258 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
259 		sc->sc_link = !sc->sc_link;
260 
261 		if (sc->sc_link) {
262 			uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
263 			panc &= ~MVNETA_PANC_FORCELINKFAIL;
264 			panc |= MVNETA_PANC_FORCELINKPASS;
265 			MVNETA_WRITE(sc, MVNETA_PANC, panc);
266 			mvneta_port_up(sc);
267 		} else {
268 			uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
269 			panc &= ~MVNETA_PANC_FORCELINKPASS;
270 			panc |= MVNETA_PANC_FORCELINKFAIL;
271 			MVNETA_WRITE(sc, MVNETA_PANC, panc);
272 		}
273 	}
274 }
275 
276 void
277 mvneta_enaddr_write(struct mvneta_softc *sc)
278 {
279 	uint32_t maddrh, maddrl;
280 	maddrh  = sc->sc_enaddr[0] << 24;
281 	maddrh |= sc->sc_enaddr[1] << 16;
282 	maddrh |= sc->sc_enaddr[2] << 8;
283 	maddrh |= sc->sc_enaddr[3];
284 	maddrl  = sc->sc_enaddr[4] << 8;
285 	maddrl |= sc->sc_enaddr[5];
286 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
287 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
288 }
289 
290 void
291 mvneta_wininit(struct mvneta_softc *sc)
292 {
293 #ifdef __armv7__
294 	uint32_t en;
295 	int i;
296 
297 	if (mvmbus_dram_info == NULL)
298 		panic("%s: mbus dram information not set up",
299 		    sc->sc_dev.dv_xname);
300 
301 	for (i = 0; i < MVNETA_NWINDOW; i++) {
302 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
303 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
304 
305 		if (i < MVNETA_NREMAP)
306 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
307 	}
308 
309 	en = MVNETA_BARE_EN_MASK;
310 
311 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
312 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
313 
314 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
315 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
316 		    MVNETA_BASEADDR_ATTR(win->attr)	|
317 		    MVNETA_BASEADDR_BASE(win->base));
318 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
319 
320 		en &= ~(1 << i);
321 	}
322 
323 	MVNETA_WRITE(sc, MVNETA_BARE, en);
324 #endif
325 }
326 
327 int
328 mvneta_match(struct device *parent, void *cfdata, void *aux)
329 {
330 	struct fdt_attach_args *faa = aux;
331 
332 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta");
333 }
334 
335 void
336 mvneta_attach(struct device *parent, struct device *self, void *aux)
337 {
338 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
339 	struct fdt_attach_args *faa = aux;
340 	struct ifnet *ifp;
341 	int i, len, node;
342 	char *phy_mode;
343 
344 	printf("\n");
345 
346 	sc->sc_iot = faa->fa_iot;
347 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
348 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
349 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
350 		printf("%s: cannot map registers\n", self->dv_xname);
351 		return;
352 	}
353 	sc->sc_dmat = faa->fa_dmat;
354 
355 	clock_enable(faa->fa_node, NULL);
356 
357 	pinctrl_byname(faa->fa_node, "default");
358 
359 	len = OF_getproplen(faa->fa_node, "phy-mode");
360 	if (len <= 0) {
361 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
362 		return;
363 	}
364 
365 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
366 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
367 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
368 		sc->sc_phy_mode = PHY_MODE_QSGMII;
369 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
370 		sc->sc_phy_mode = PHY_MODE_SGMII;
371 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
372 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
373 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
374 		sc->sc_phy_mode = PHY_MODE_RGMII;
375 	else {
376 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
377 		    phy_mode);
378 		return;
379 	}
380 	free(phy_mode, M_TEMP, len);
381 
382 	/* TODO: check child's name to be "fixed-link" */
383 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
384 	    OF_child(faa->fa_node))
385 		sc->sc_fixed_link = 1;
386 
387 	if (!sc->sc_fixed_link) {
388 		node = OF_getnodebyphandle(OF_getpropint(faa->fa_node,
389 		    "phy", 0));
390 		if (!node) {
391 			printf("%s: cannot find phy in fdt\n", self->dv_xname);
392 			return;
393 		}
394 
395 		if ((sc->sc_phy = OF_getpropint(node, "reg", -1)) == -1) {
396 			printf("%s: cannot extract phy addr\n", self->dv_xname);
397 			return;
398 		}
399 	}
400 
401 	mvneta_wininit(sc);
402 
403 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
404 	    ETHER_ADDR_LEN) {
405 		OF_getprop(faa->fa_node, "local-mac-address",
406 		    sc->sc_enaddr, ETHER_ADDR_LEN);
407 		mvneta_enaddr_write(sc);
408 	} else {
409 		uint32_t maddrh, maddrl;
410 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
411 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
412 		if (maddrh || maddrl) {
413 			sc->sc_enaddr[0] = maddrh >> 24;
414 			sc->sc_enaddr[1] = maddrh >> 16;
415 			sc->sc_enaddr[2] = maddrh >> 8;
416 			sc->sc_enaddr[3] = maddrh >> 0;
417 			sc->sc_enaddr[4] = maddrl >> 8;
418 			sc->sc_enaddr[5] = maddrl >> 0;
419 		} else
420 			ether_fakeaddr(&sc->sc_ac.ac_if);
421 	}
422 
423 	printf("%s: Ethernet address %s\n", self->dv_xname,
424 	    ether_sprintf(sc->sc_enaddr));
425 
426 	/* disable port */
427 	MVNETA_WRITE(sc, MVNETA_PMACC0,
428 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
429 	delay(200);
430 
431 	/* clear all cause registers */
432 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
433 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
434 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
435 
436 	/* mask all interrupts */
437 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
438 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
439 	MVNETA_WRITE(sc, MVNETA_PMIM, 0);
440 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
441 
442 	/* enable MBUS Retry bit16 */
443 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
444 
445 	/* enable access for CPU0 */
446 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
447 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
448 
449 	/* reset RX and TX DMAs */
450 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
451 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
452 
453 	/* disable legacy WRR, disable EJP, release from reset */
454 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
455 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
456 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
457 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
458 	}
459 
460 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
461 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
462 
463 	/* set port acceleration mode */
464 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
465 
466 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
467 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
468 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
469 
470 	/* Set SDC register except IPGINT bits */
471 	MVNETA_WRITE(sc, MVNETA_SDC,
472 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
473 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
474 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
475 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
476 
477 	/* XXX: Disable PHY polling in hardware */
478 	MVNETA_WRITE(sc, MVNETA_EUC,
479 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
480 
481 	/* Disable Auto-Negotiation */
482 	MVNETA_WRITE(sc, MVNETA_PANC,
483 	    MVNETA_READ(sc, MVNETA_PANC) & ~(MVNETA_PANC_INBANDANEN |
484 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_ANDUPLEXEN));
485 	MVNETA_WRITE(sc, MVNETA_OMSCD,
486 	    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
487 	MVNETA_WRITE(sc, MVNETA_PMACC2,
488 	    MVNETA_READ(sc, MVNETA_PMACC2) & ~MVNETA_PMACC2_INBANDAN);
489 
490 	/* clear uni-/multicast tables */
491 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
492 	memset(dfut, 0, sizeof(dfut));
493 	memset(dfsmt, 0, sizeof(dfut));
494 	memset(dfomt, 0, sizeof(dfut));
495 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
496 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
497 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
498 
499 	MVNETA_WRITE(sc, MVNETA_PIE,
500 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
501 
502 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
503 
504 	/* Setup phy. */
505 	uint32_t ctrl = MVNETA_READ(sc, MVNETA_PMACC2);
506 	switch (sc->sc_phy_mode) {
507 	case PHY_MODE_QSGMII:
508 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
509 		    MVNETA_SERDESCFG_QSGMII_PROTO);
510 		ctrl |= MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN;
511 		break;
512 	case PHY_MODE_SGMII:
513 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
514 		    MVNETA_SERDESCFG_SGMII_PROTO);
515 		ctrl |= MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN;
516 		break;
517 	case PHY_MODE_RGMII:
518 	case PHY_MODE_RGMII_ID:
519 		ctrl |= MVNETA_PMACC2_RGMIIEN;
520 		break;
521 	}
522 
523 	ctrl &= ~MVNETA_PMACC2_PORTMACRESET;
524 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctrl);
525 
526 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
527 		;
528 
529 	arm_intr_establish_fdt(faa->fa_node, IPL_NET, mvneta_intr, sc,
530 	    sc->sc_dev.dv_xname);
531 
532 	ifp = &sc->sc_ac.ac_if;
533 	ifp->if_softc = sc;
534 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
535 	ifp->if_start = mvneta_start;
536 	ifp->if_ioctl = mvneta_ioctl;
537 	ifp->if_watchdog = mvneta_watchdog;
538 	ifp->if_capabilities = IFCAP_VLAN_MTU;
539 
540 #if notyet
541 	/*
542 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
543 	 */
544 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
545 				IFCAP_CSUM_UDPv4;
546 
547 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
548 	/*
549 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
550 	 */
551 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
552 #endif
553 
554 	IFQ_SET_MAXLEN(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
555 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
556 
557 	/*
558 	 * Do MII setup.
559 	 */
560 	sc->sc_mii.mii_ifp = ifp;
561 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
562 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
563 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
564 
565 	ifmedia_init(&sc->sc_mii.mii_media, 0,
566 	    mvneta_mediachange, mvneta_mediastatus);
567 
568 	if (!sc->sc_fixed_link) {
569 		extern void *mvmdio_sc;
570 		sc->sc_mdio = mvmdio_sc;
571 
572 		if (sc->sc_mdio == NULL) {
573 			config_defer(self, mvneta_attach_deferred);
574 			return;
575 		}
576 
577 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phy,
578 		    MII_OFFSET_ANY, 0);
579 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
580 			printf("%s: no PHY found!\n", self->dv_xname);
581 			ifmedia_add(&sc->sc_mii.mii_media,
582 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
583 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
584 		} else
585 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
586 	} else {
587 		ifmedia_add(&sc->sc_mii.mii_media,
588 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
589 		ifmedia_set(&sc->sc_mii.mii_media,
590 		    IFM_ETHER|IFM_MANUAL);
591 
592 		sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
593 		sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
594 		mvneta_miibus_statchg(self);
595 
596 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
597 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
598 	}
599 
600 	/*
601 	 * Call MI attach routines.
602 	 */
603 	if_attach(ifp);
604 	ether_ifattach(ifp);
605 
606 	return;
607 }
608 
609 void
610 mvneta_attach_deferred(struct device *self)
611 {
612 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
613 	struct ifnet *ifp = &sc->sc_ac.ac_if;
614 
615 	extern void *mvmdio_sc;
616 	sc->sc_mdio = mvmdio_sc;
617 	if (sc->sc_mdio == NULL) {
618 		printf("%s: mdio bus not yet attached\n", self->dv_xname);
619 		return;
620 	}
621 
622 	mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phy,
623 	    MII_OFFSET_ANY, 0);
624 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
625 		printf("%s: no PHY found!\n", self->dv_xname);
626 		ifmedia_add(&sc->sc_mii.mii_media,
627 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
628 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
629 	} else
630 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
631 
632 	/*
633 	 * Call MI attach routines.
634 	 */
635 	if_attach(ifp);
636 	ether_ifattach(ifp);
637 
638 	return;
639 }
640 
641 void
642 mvneta_tick(void *arg)
643 {
644 	struct mvneta_softc *sc = arg;
645 	struct mii_data *mii = &sc->sc_mii;
646 	int s;
647 
648 	s = splnet();
649 	mii_tick(mii);
650 	splx(s);
651 
652 	timeout_add(&sc->sc_tick_ch, hz);
653 }
654 
655 int
656 mvneta_intr(void *arg)
657 {
658 	struct mvneta_softc *sc = arg;
659 	struct ifnet *ifp = &sc->sc_ac.ac_if;
660 	uint32_t ic;
661 
662 	if (!(ifp->if_flags & IFF_RUNNING))
663 		return 1;
664 
665 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
666 
667 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
668 		mvneta_tx_proc(sc);
669 
670 	if (ic & MVNETA_PRXTXTI_RBICTAPQ(0))
671 		mvneta_rx_proc(sc);
672 
673 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
674 		mvneta_start(ifp);
675 
676 	return 1;
677 }
678 
679 void
680 mvneta_start(struct ifnet *ifp)
681 {
682 	struct mvneta_softc *sc = ifp->if_softc;
683 	struct mbuf *m_head = NULL;
684 	int idx;
685 
686 	DPRINTFN(3, ("mvneta_start (idx %d)\n", sc->sc_tx_prod));
687 
688 	if (!(ifp->if_flags & IFF_RUNNING))
689 		return;
690 	if (ifq_is_oactive(&ifp->if_snd))
691 		return;
692 	if (IFQ_IS_EMPTY(&ifp->if_snd))
693 		return;
694 
695 	/* If Link is DOWN, can't start TX */
696 	if (!MVNETA_IS_LINKUP(sc))
697 		return;
698 
699 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
700 	    MVNETA_DMA_LEN(sc->sc_txring),
701 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
702 
703 	idx = sc->sc_tx_prod;
704 	while (sc->sc_tx_cnt < MVNETA_TX_RING_CNT) {
705 		m_head = ifq_deq_begin(&ifp->if_snd);
706 		if (m_head == NULL)
707 			break;
708 
709 		/*
710 		 * Pack the data into the transmit ring. If we
711 		 * don't have room, set the OACTIVE flag and wait
712 		 * for the NIC to drain the ring.
713 		 */
714 		if (mvneta_encap(sc, m_head, &idx)) {
715 			ifq_deq_rollback(&ifp->if_snd, m_head);
716 			ifq_set_oactive(&ifp->if_snd);
717 			break;
718 		}
719 
720 		/* now we are committed to transmit the packet */
721 		ifq_deq_commit(&ifp->if_snd, m_head);
722 
723 		/*
724 		 * If there's a BPF listener, bounce a copy of this frame
725 		 * to him.
726 		 */
727 #if NBPFILTER > 0
728 		if (ifp->if_bpf)
729 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
730 #endif
731 	}
732 
733 	if (sc->sc_tx_prod != idx) {
734 		sc->sc_tx_prod = idx;
735 
736 		/*
737 		 * Set a timeout in case the chip goes out to lunch.
738 		 */
739 		ifp->if_timer = 5;
740 	}
741 }
742 
743 int
744 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
745 {
746 	struct mvneta_softc *sc = ifp->if_softc;
747 	struct ifreq *ifr = (struct ifreq *)addr;
748 	int s, error = 0;
749 
750 	s = splnet();
751 
752 	switch (cmd) {
753 	case SIOCSIFADDR:
754 		ifp->if_flags |= IFF_UP;
755 		/* FALLTHROUGH */
756 	case SIOCSIFFLAGS:
757 		if (ifp->if_flags & IFF_UP) {
758 			if (ifp->if_flags & IFF_RUNNING)
759 				error = ENETRESET;
760 			else
761 				mvneta_up(sc);
762 		} else {
763 			if (ifp->if_flags & IFF_RUNNING)
764 				mvneta_down(sc);
765 		}
766 		break;
767 	case SIOCGIFMEDIA:
768 	case SIOCSIFMEDIA:
769 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
770 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
771 		break;
772 	case SIOCGIFRXR:
773 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
774 		    NULL, MCLBYTES, &sc->sc_rx_ring);
775 		break;
776 	default:
777 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
778 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
779 		break;
780 	}
781 
782 	if (error == ENETRESET) {
783 		if (ifp->if_flags & IFF_RUNNING)
784 			mvneta_iff(sc);
785 		error = 0;
786 	}
787 
788 	splx(s);
789 
790 	return error;
791 }
792 
793 void
794 mvneta_port_up(struct mvneta_softc *sc)
795 {
796 	/* Enable port RX/TX. */
797 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
798 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
799 }
800 
801 int
802 mvneta_up(struct mvneta_softc *sc)
803 {
804 	struct ifnet *ifp = &sc->sc_ac.ac_if;
805 	struct mvneta_buf *txb, *rxb;
806 	int i;
807 
808 	DPRINTFN(2, ("mvneta_up\n"));
809 
810 	/* Allocate Tx descriptor ring. */
811 	sc->sc_txring = mvneta_dmamem_alloc(sc,
812 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
813 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
814 
815 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
816 	    M_DEVBUF, M_WAITOK);
817 
818 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
819 		txb = &sc->sc_txbuf[i];
820 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
821 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
822 		txb->tb_m = NULL;
823 	}
824 
825 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
826 	sc->sc_tx_cnt = 0;
827 
828 	/* Allocate Rx descriptor ring. */
829 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
830 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
831 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
832 
833 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
834 	    M_DEVBUF, M_WAITOK);
835 
836 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
837 		rxb = &sc->sc_rxbuf[i];
838 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
839 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
840 		rxb->tb_m = NULL;
841 	}
842 
843 	/* Set Rx descriptor ring data. */
844 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
845 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
846 	    ((MCLBYTES >> 3) << 19));
847 	MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
848 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
849 
850 	/* Set Tx queue bandwidth. */
851 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
852 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
853 
854 	/* Set Tx descriptor ring data. */
855 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
856 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
857 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT));
858 
859 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
860 
861 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
862 	mvneta_fill_rx_ring(sc);
863 
864 	/* TODO: correct frame size */
865 	MVNETA_WRITE(sc, MVNETA_PMACC0,
866 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
867 
868 	/* set max MTU */
869 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
870 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
871 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
872 
873 	/* enable port */
874 	MVNETA_WRITE(sc, MVNETA_PMACC0,
875 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
876 
877 	mvneta_enaddr_write(sc);
878 
879 	/* Program promiscuous mode and multicast filters. */
880 	mvneta_iff(sc);
881 
882 	if (!sc->sc_fixed_link)
883 		mii_mediachg(&sc->sc_mii);
884 
885 	if (sc->sc_link)
886 		mvneta_port_up(sc);
887 
888 	/* Enable interrupt masks */
889 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM,
890 	    MVNETA_PRXTXTI_RBICTAPQ(0) |
891 	    MVNETA_PRXTXTI_TBTCQ(0));
892 
893 	timeout_add(&sc->sc_tick_ch, hz);
894 
895 	ifp->if_flags |= IFF_RUNNING;
896 	ifq_clr_oactive(&ifp->if_snd);
897 
898 	return 0;
899 }
900 
901 void
902 mvneta_down(struct mvneta_softc *sc)
903 {
904 	struct ifnet *ifp = &sc->sc_ac.ac_if;
905 	uint32_t reg, txinprog, txfifoemp;
906 	struct mvneta_buf *txb, *rxb;
907 	int i, cnt;
908 
909 	DPRINTFN(2, ("mvneta_down\n"));
910 
911 	timeout_del(&sc->sc_tick_ch);
912 
913 	/* Stop Rx port activity. Check port Rx activity. */
914 	reg = MVNETA_READ(sc, MVNETA_RQC);
915 	if (reg & MVNETA_RQC_ENQ_MASK)
916 		/* Issue stop command for active channels only */
917 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
918 
919 	/* Stop Tx port activity. Check port Tx activity. */
920 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
921 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
922 
923 	txinprog = MVNETA_PS_TXINPROG_(0);
924 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
925 
926 #define RX_DISABLE_TIMEOUT		0x1000000
927 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
928 	/* Wait for all Rx activity to terminate. */
929 	cnt = 0;
930 	do {
931 		if (cnt >= RX_DISABLE_TIMEOUT) {
932 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
933 			    sc->sc_dev.dv_xname, reg);
934 			break;
935 		}
936 		cnt++;
937 
938 		/*
939 		 * Check Receive Queue Command register that all Rx queues
940 		 * are stopped
941 		 */
942 		reg = MVNETA_READ(sc, MVNETA_RQC);
943 	} while (reg & 0xff);
944 
945 	/* Double check to verify that TX FIFO is empty */
946 	cnt = 0;
947 	while (1) {
948 		do {
949 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
950 				printf("%s: timeout for TX FIFO empty. status "
951 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
952 				break;
953 			}
954 			cnt++;
955 
956 			reg = MVNETA_READ(sc, MVNETA_PS);
957 		} while (!(reg & txfifoemp) || reg & txinprog);
958 
959 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
960 			break;
961 
962 		/* Double check */
963 		reg = MVNETA_READ(sc, MVNETA_PS);
964 		if (reg & txfifoemp && !(reg & txinprog))
965 			break;
966 		else
967 			printf("%s: TX FIFO empty double check failed."
968 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
969 			    cnt, reg);
970 	}
971 
972 	delay(200);
973 
974 	/* disable port */
975 	MVNETA_WRITE(sc, MVNETA_PMACC0,
976 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
977 	delay(200);
978 
979 	/* clear all cause registers */
980 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
981 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
982 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
983 
984 	/* mask all interrupts */
985 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
986 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
987 	MVNETA_WRITE(sc, MVNETA_PMIM, 0);
988 
989 	/* Free RX and TX mbufs still in the queues. */
990 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
991 		txb = &sc->sc_txbuf[i];
992 		if (txb->tb_m) {
993 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
994 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
995 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
996 			m_freem(txb->tb_m);
997 		}
998 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
999 	}
1000 
1001 	mvneta_dmamem_free(sc, sc->sc_txring);
1002 	free(sc->sc_txbuf, M_DEVBUF, 0);
1003 
1004 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1005 		rxb = &sc->sc_rxbuf[i];
1006 		if (rxb->tb_m) {
1007 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1008 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1009 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1010 			m_freem(rxb->tb_m);
1011 		}
1012 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1013 	}
1014 
1015 	mvneta_dmamem_free(sc, sc->sc_rxring);
1016 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1017 
1018 	/* reset RX and TX DMAs */
1019 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1020 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1021 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1022 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1023 
1024 	ifp->if_flags &= ~IFF_RUNNING;
1025 	ifq_clr_oactive(&ifp->if_snd);
1026 }
1027 
1028 void
1029 mvneta_watchdog(struct ifnet *ifp)
1030 {
1031 	struct mvneta_softc *sc = ifp->if_softc;
1032 
1033 	/*
1034 	 * Reclaim first as there is a possibility of losing Tx completion
1035 	 * interrupts.
1036 	 */
1037 	mvneta_tx_proc(sc);
1038 	if (sc->sc_tx_cnt != 0) {
1039 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1040 
1041 		ifp->if_oerrors++;
1042 	}
1043 }
1044 
1045 /*
1046  * Set media options.
1047  */
1048 int
1049 mvneta_mediachange(struct ifnet *ifp)
1050 {
1051 	struct mvneta_softc *sc = ifp->if_softc;
1052 
1053 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1054 		mii_mediachg(&sc->sc_mii);
1055 
1056 	return (0);
1057 }
1058 
1059 /*
1060  * Report current media status.
1061  */
1062 void
1063 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1064 {
1065 	struct mvneta_softc *sc = ifp->if_softc;
1066 
1067 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1068 		mii_pollstat(&sc->sc_mii);
1069 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1070 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1071 	}
1072 
1073 	if (sc->sc_fixed_link) {
1074 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1075 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1076 	}
1077 }
1078 
1079 int
1080 mvneta_encap(struct mvneta_softc *sc, struct mbuf *m, uint32_t *idx)
1081 {
1082 	struct mvneta_tx_desc *txd;
1083 	bus_dmamap_t map;
1084 	uint32_t cmdsts;
1085 	int i, current, first, last;
1086 
1087 	DPRINTFN(3, ("mvneta_encap\n"));
1088 
1089 	first = last = current = *idx;
1090 	map = sc->sc_txbuf[current].tb_map;
1091 
1092 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1093 		return (ENOBUFS);
1094 
1095 	if (map->dm_nsegs > (MVNETA_TX_RING_CNT - sc->sc_tx_cnt - 2)) {
1096 		bus_dmamap_unload(sc->sc_dmat, map);
1097 		return (ENOBUFS);
1098 	}
1099 
1100 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1101 	    BUS_DMASYNC_PREWRITE);
1102 
1103 	DPRINTFN(2, ("mvneta_encap: dm_nsegs=%d\n", map->dm_nsegs));
1104 
1105 	cmdsts = MVNETA_TX_L4_CSUM_NOT;
1106 #if notyet
1107 	int m_csumflags;
1108 	if (m_csumflags & M_CSUM_IPv4)
1109 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
1110 	if (m_csumflags & M_CSUM_TCPv4)
1111 		cmdsts |=
1112 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
1113 	if (m_csumflags & M_CSUM_UDPv4)
1114 		cmdsts |=
1115 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
1116 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1117 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
1118 
1119 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
1120 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
1121 	}
1122 #endif
1123 
1124 	for (i = 0; i < map->dm_nsegs; i++) {
1125 		txd = &sc->sc_txdesc[current];
1126 		memset(txd, 0, sizeof(*txd));
1127 		txd->bufptr = map->dm_segs[i].ds_addr;
1128 		txd->bytecnt = map->dm_segs[i].ds_len;
1129 		txd->cmdsts = cmdsts |
1130 		    MVNETA_TX_ZERO_PADDING;
1131 		if (i == 0)
1132 		    txd->cmdsts |= MVNETA_TX_FIRST_DESC;
1133 		if (i == (map->dm_nsegs - 1))
1134 		    txd->cmdsts |= MVNETA_TX_LAST_DESC;
1135 
1136 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring),
1137 		    current * sizeof(*txd), sizeof(*txd),
1138 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1139 
1140 		last = current;
1141 		current = MVNETA_TX_RING_NEXT(current);
1142 		KASSERT(current != sc->sc_tx_cons);
1143 	}
1144 
1145 	KASSERT(sc->sc_txbuf[last].tb_m == NULL);
1146 	sc->sc_txbuf[first].tb_map = sc->sc_txbuf[last].tb_map;
1147 	sc->sc_txbuf[last].tb_map = map;
1148 	sc->sc_txbuf[last].tb_m = m;
1149 
1150 	sc->sc_tx_cnt += map->dm_nsegs;
1151 	*idx = current;
1152 
1153 	/* Let him know we sent another packet. */
1154 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), map->dm_nsegs);
1155 
1156 	DPRINTFN(3, ("mvneta_encap: completed successfully\n"));
1157 
1158 	return 0;
1159 }
1160 
1161 void
1162 mvneta_rx_proc(struct mvneta_softc *sc)
1163 {
1164 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1165 	struct mvneta_rx_desc *rxd;
1166 	struct mvneta_buf *rxb;
1167 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1168 	struct mbuf *m;
1169 	uint32_t rxstat;
1170 	int i, idx, len, ready;
1171 
1172 	DPRINTFN(3, ("%s: %d\n", __func__, sc->sc_rx_cons));
1173 
1174 	if (!(ifp->if_flags & IFF_RUNNING))
1175 		return;
1176 
1177 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring), 0,
1178 	    MVNETA_DMA_LEN(sc->sc_rxring),
1179 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1180 
1181 	ready = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1182 	MVNETA_WRITE(sc, MVNETA_PRXSU(0), ready);
1183 
1184 	for (i = 0; i < ready; i++) {
1185 		idx = sc->sc_rx_cons;
1186 		KASSERT(idx < MVNETA_RX_RING_CNT);
1187 
1188 		rxd = &sc->sc_rxdesc[idx];
1189 
1190 #ifdef DIAGNOSTIC
1191 		if ((rxd->cmdsts &
1192 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC)) !=
1193 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC))
1194 			panic("%s: buffer size is smaller than packet",
1195 			    __func__);
1196 #endif
1197 
1198 		len = rxd->bytecnt;
1199 		rxb = &sc->sc_rxbuf[idx];
1200 		KASSERT(rxb->tb_m);
1201 
1202 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1203 		    len, BUS_DMASYNC_POSTREAD);
1204 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1205 
1206 		m = rxb->tb_m;
1207 		rxb->tb_m = NULL;
1208 		m->m_pkthdr.len = m->m_len = len;
1209 
1210 		rxstat = rxd->cmdsts;
1211 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1212 #if 0
1213 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1214 
1215 			if (err == MVNETA_RX_CRC_ERROR)
1216 				ifp->if_ierrors++;
1217 			if (err == MVNETA_RX_OVERRUN_ERROR)
1218 				ifp->if_ierrors++;
1219 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1220 				ifp->if_ierrors++;
1221 			if (err == MVNETA_RX_RESOURCE_ERROR)
1222 				ifp->if_ierrors++;
1223 #else
1224 			ifp->if_ierrors++;
1225 #endif
1226 			panic("%s: handle input errors", __func__);
1227 			continue;
1228 		}
1229 
1230 #if notyet
1231 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1232 			int flgs = 0;
1233 
1234 			/* Check IPv4 header checksum */
1235 			flgs |= M_CSUM_IPv4;
1236 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1237 				flgs |= M_CSUM_IPv4_BAD;
1238 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1239 				/*
1240 				 * Check TCPv4/UDPv4 checksum for
1241 				 * non-fragmented packet only.
1242 				 *
1243 				 * It seemd that sometimes
1244 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1245 				 * even if the checksum is correct and the
1246 				 * packet was not fragmented. So we don't set
1247 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1248 				 */
1249 
1250 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1251 					MVNETA_RX_L4_TYPE_TCP) &&
1252 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1253 					flgs |= M_CSUM_TCPv4;
1254 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1255 					MVNETA_RX_L4_TYPE_UDP) &&
1256 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1257 					flgs |= M_CSUM_UDPv4;
1258 			}
1259 			m->m_pkthdr.csum_flags = flgs;
1260 		}
1261 #endif
1262 
1263 		/* Skip on first 2byte (HW header) */
1264 		m_adj(m, MVNETA_HWHEADER_SIZE);
1265 
1266 		ml_enqueue(&ml, m);
1267 
1268 		if_rxr_put(&sc->sc_rx_ring, 1);
1269 
1270 		sc->sc_rx_cons = MVNETA_RX_RING_NEXT(idx);
1271 	}
1272 
1273 	mvneta_fill_rx_ring(sc);
1274 
1275 	if_input(ifp, &ml);
1276 }
1277 
1278 void
1279 mvneta_tx_proc(struct mvneta_softc *sc)
1280 {
1281 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1282 	struct mvneta_tx_desc *txd;
1283 	struct mvneta_buf *txb;
1284 	int i, idx, sent;
1285 
1286 	DPRINTFN(3, ("%s\n", __func__));
1287 
1288 	if (!(ifp->if_flags & IFF_RUNNING))
1289 		return;
1290 
1291 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1292 	    MVNETA_DMA_LEN(sc->sc_txring),
1293 	    BUS_DMASYNC_POSTREAD);
1294 
1295 	sent = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1296 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NORB(sent));
1297 
1298 	for (i = 0; i < sent; i++) {
1299 		idx = sc->sc_tx_cons;
1300 		KASSERT(idx < MVNETA_TX_RING_CNT);
1301 
1302 		txd = &sc->sc_txdesc[idx];
1303 		txb = &sc->sc_txbuf[idx];
1304 		if (txb->tb_m) {
1305 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1306 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1307 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1308 
1309 			m_freem(txb->tb_m);
1310 			txb->tb_m = NULL;
1311 		}
1312 
1313 		ifq_clr_oactive(&ifp->if_snd);
1314 
1315 		sc->sc_tx_cnt--;
1316 
1317 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1318 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1319 
1320 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1321 				ifp->if_collisions++;
1322 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1323 				ifp->if_oerrors++;
1324 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1325 				ifp->if_collisions++;
1326 		}
1327 
1328 		sc->sc_tx_cons = MVNETA_TX_RING_NEXT(sc->sc_tx_cons);
1329 	}
1330 
1331 	if (sc->sc_tx_cnt == 0)
1332 		ifp->if_timer = 0;
1333 }
1334 
1335 uint8_t
1336 mvneta_crc8(const uint8_t *data, size_t size)
1337 {
1338 	int bit;
1339 	uint8_t byte;
1340 	uint8_t crc = 0;
1341 	const uint8_t poly = 0x07;
1342 
1343 	while(size--)
1344 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1345 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1346 
1347 	return crc;
1348 }
1349 
1350 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1351 
1352 void
1353 mvneta_iff(struct mvneta_softc *sc)
1354 {
1355 	struct arpcom *ac = &sc->sc_ac;
1356 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1357 	struct ether_multi *enm;
1358 	struct ether_multistep step;
1359 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1360 	uint32_t pxc;
1361 	int i;
1362 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1363 
1364 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1365 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1366 	ifp->if_flags &= ~IFF_ALLMULTI;
1367 	memset(dfut, 0, sizeof(dfut));
1368 	memset(dfsmt, 0, sizeof(dfsmt));
1369 	memset(dfomt, 0, sizeof(dfomt));
1370 
1371 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1372 		ifp->if_flags |= IFF_ALLMULTI;
1373 		if (ifp->if_flags & IFF_PROMISC)
1374 			pxc |= MVNETA_PXC_UPM;
1375 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1376 			dfsmt[i] = dfomt[i] =
1377 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1378 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1379 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1380 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1381 		}
1382 	} else {
1383 		ETHER_FIRST_MULTI(step, ac, enm);
1384 		while (enm != NULL) {
1385 			/* chip handles some IPv4 multicast specially */
1386 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1387 				i = enm->enm_addrlo[5];
1388 				dfsmt[i>>2] |=
1389 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1390 			} else {
1391 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1392 				dfomt[i>>2] |=
1393 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1394 			}
1395 
1396 			ETHER_NEXT_MULTI(step, enm);
1397 		}
1398 	}
1399 
1400 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1401 
1402 	/* Set Destination Address Filter Unicast Table */
1403 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1404 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1405 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1406 
1407 	/* Set Destination Address Filter Multicast Tables */
1408 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1409 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1410 }
1411 
1412 struct mvneta_dmamem *
1413 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1414 {
1415 	struct mvneta_dmamem *mdm;
1416 	int nsegs;
1417 
1418 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1419 	mdm->mdm_size = size;
1420 
1421 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1422 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1423 		goto mdmfree;
1424 
1425 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1426 	    &nsegs, BUS_DMA_WAITOK) != 0)
1427 		goto destroy;
1428 
1429 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1430 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1431 		goto free;
1432 
1433 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1434 	    NULL, BUS_DMA_WAITOK) != 0)
1435 		goto unmap;
1436 
1437 	bzero(mdm->mdm_kva, size);
1438 
1439 	return (mdm);
1440 
1441 unmap:
1442 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1443 free:
1444 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1445 destroy:
1446 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1447 mdmfree:
1448 	free(mdm, M_DEVBUF, 0);
1449 
1450 	return (NULL);
1451 }
1452 
1453 void
1454 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1455 {
1456 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1457 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1458 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1459 	free(mdm, M_DEVBUF, 0);
1460 }
1461 
1462 struct mbuf *
1463 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1464 {
1465 	struct mbuf *m = NULL;
1466 
1467 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1468 	if (!m)
1469 		return (NULL);
1470 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1471 
1472 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1473 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1474 		m_freem(m);
1475 		return (NULL);
1476 	}
1477 
1478 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1479 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1480 
1481 	return (m);
1482 }
1483 
1484 void
1485 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1486 {
1487 	struct mvneta_rx_desc *rxd;
1488 	struct mvneta_buf *rxb;
1489 	u_int slots;
1490 
1491 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_RX_RING_CNT);
1492 	    slots > 0; slots--) {
1493 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1494 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1495 		if (rxb->tb_m == NULL)
1496 			break;
1497 
1498 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1499 		memset(rxd, 0, sizeof(*rxd));
1500 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1501 
1502 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1503 		    sc->sc_rx_prod * sizeof(*rxd), sizeof(*rxd),
1504 		    BUS_DMASYNC_PREWRITE);
1505 
1506 		sc->sc_rx_prod = MVNETA_RX_RING_NEXT(sc->sc_rx_prod);
1507 
1508 		/* Tell him that there's a new free desc. */
1509 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1510 		    MVNETA_PRXSU_NOOFNEWDESCRIPTORS(1));
1511 	}
1512 
1513 	if_rxr_put(&sc->sc_rx_ring, slots);
1514 }
1515