xref: /openbsd-src/sys/dev/fdt/if_mvneta.c (revision 78fec973f57e9fc9edd564490c79661460ad807b)
1 /*	$OpenBSD: if_mvneta.c,v 1.26 2022/06/05 02:54:18 dlg Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 #include "kstat.h"
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/systm.h>
35 #include <sys/endian.h>
36 #include <sys/errno.h>
37 #include <sys/kernel.h>
38 #include <sys/mutex.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <uvm/uvm_extern.h>
42 #include <sys/mbuf.h>
43 #include <sys/kstat.h>
44 
45 #include <machine/bus.h>
46 #include <machine/cpufunc.h>
47 #include <machine/fdt.h>
48 
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_clock.h>
51 #include <dev/ofw/ofw_misc.h>
52 #include <dev/ofw/ofw_pinctrl.h>
53 #include <dev/ofw/fdt.h>
54 
55 #include <dev/fdt/if_mvnetareg.h>
56 
57 #ifdef __armv7__
58 #include <armv7/marvell/mvmbusvar.h>
59 #endif
60 
61 #include <net/if.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 
65 #include <net/bpf.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/if_ether.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76 
77 #ifdef MVNETA_DEBUG
78 #define DPRINTF(x)	if (mvneta_debug) printf x
79 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
80 int mvneta_debug = MVNETA_DEBUG;
81 #else
82 #define DPRINTF(x)
83 #define DPRINTFN(n,x)
84 #endif
85 
86 #define MVNETA_READ(sc, reg) \
87 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
88 #define MVNETA_WRITE(sc, reg, val) \
89 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
90 #define MVNETA_READ_FILTER(sc, reg, val, c) \
91 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
92 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
93 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
94 
95 #define MVNETA_LINKUP_READ(sc) \
96 	MVNETA_READ(sc, MVNETA_PS0)
97 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
98 
99 #define MVNETA_TX_RING_CNT	256
100 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
101 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
102 #define MVNETA_TX_QUEUE_CNT	1
103 #define MVNETA_RX_RING_CNT	256
104 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
105 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
106 #define MVNETA_RX_QUEUE_CNT	1
107 
108 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
109 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
110 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
111 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
112 
113 #define MVNETA_NTXSEG		30
114 
115 struct mvneta_dmamem {
116 	bus_dmamap_t		mdm_map;
117 	bus_dma_segment_t	mdm_seg;
118 	size_t			mdm_size;
119 	caddr_t			mdm_kva;
120 };
121 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
122 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
123 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
124 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
125 
126 struct mvneta_buf {
127 	bus_dmamap_t	tb_map;
128 	struct mbuf	*tb_m;
129 };
130 
131 struct mvneta_softc {
132 	struct device sc_dev;
133 	struct mii_bus *sc_mdio;
134 
135 	bus_space_tag_t sc_iot;
136 	bus_space_handle_t sc_ioh;
137 	bus_dma_tag_t sc_dmat;
138 	void *sc_ih;
139 
140 	uint64_t		sc_clk_freq;
141 
142 	struct arpcom sc_ac;
143 #define sc_enaddr sc_ac.ac_enaddr
144 	struct mii_data sc_mii;
145 #define sc_media sc_mii.mii_media
146 
147 	struct timeout sc_tick_ch;
148 
149 	struct mvneta_dmamem	*sc_txring;
150 	struct mvneta_buf	*sc_txbuf;
151 	struct mvneta_tx_desc	*sc_txdesc;
152 	unsigned int		 sc_tx_prod;	/* next free tx desc */
153 	unsigned int		 sc_tx_cons;	/* first tx desc sent */
154 
155 	struct mvneta_dmamem	*sc_rxring;
156 	struct mvneta_buf	*sc_rxbuf;
157 	struct mvneta_rx_desc	*sc_rxdesc;
158 	unsigned int		 sc_rx_prod;	/* next rx desc to fill */
159 	unsigned int		 sc_rx_cons;	/* next rx desc recvd */
160 	struct if_rxring	 sc_rx_ring;
161 
162 	enum {
163 		PHY_MODE_QSGMII,
164 		PHY_MODE_SGMII,
165 		PHY_MODE_RGMII,
166 		PHY_MODE_RGMII_ID,
167 		PHY_MODE_1000BASEX,
168 		PHY_MODE_2500BASEX,
169 	}			 sc_phy_mode;
170 	int			 sc_fixed_link;
171 	int			 sc_inband_status;
172 	int			 sc_phy;
173 	int			 sc_phyloc;
174 	int			 sc_link;
175 	int			 sc_sfp;
176 	int			 sc_node;
177 
178 #if NKSTAT > 0
179 	struct mutex		 sc_kstat_lock;
180 	struct timeout		 sc_kstat_tick;
181 	struct kstat		*sc_kstat;
182 #endif
183 };
184 
185 
186 int mvneta_miibus_readreg(struct device *, int, int);
187 void mvneta_miibus_writereg(struct device *, int, int, int);
188 void mvneta_miibus_statchg(struct device *);
189 
190 void mvneta_wininit(struct mvneta_softc *);
191 
192 /* Gigabit Ethernet Port part functions */
193 int mvneta_match(struct device *, void *, void *);
194 void mvneta_attach(struct device *, struct device *, void *);
195 void mvneta_attach_deferred(struct device *);
196 
197 void mvneta_tick(void *);
198 int mvneta_intr(void *);
199 
200 void mvneta_start(struct ifqueue *);
201 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
202 void mvneta_inband_statchg(struct mvneta_softc *);
203 void mvneta_port_change(struct mvneta_softc *);
204 void mvneta_port_up(struct mvneta_softc *);
205 int mvneta_up(struct mvneta_softc *);
206 void mvneta_down(struct mvneta_softc *);
207 void mvneta_watchdog(struct ifnet *);
208 
209 int mvneta_mediachange(struct ifnet *);
210 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
211 
212 void mvneta_rx_proc(struct mvneta_softc *);
213 void mvneta_tx_proc(struct mvneta_softc *);
214 uint8_t mvneta_crc8(const uint8_t *, size_t);
215 void mvneta_iff(struct mvneta_softc *);
216 
217 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
218     bus_size_t, bus_size_t);
219 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
220 void mvneta_fill_rx_ring(struct mvneta_softc *);
221 
222 #if NKSTAT > 0
223 void		mvneta_kstat_attach(struct mvneta_softc *);
224 #endif
225 
226 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff");
227 
228 struct cfdriver mvneta_cd = {
229 	NULL, "mvneta", DV_IFNET
230 };
231 
232 const struct cfattach mvneta_ca = {
233 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
234 };
235 
236 int
237 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
238 {
239 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
240 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
241 }
242 
243 void
244 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
245 {
246 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
247 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
248 }
249 
250 void
251 mvneta_miibus_statchg(struct device *self)
252 {
253 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
254 
255 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
256 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
257 
258 		panc &= ~(MVNETA_PANC_SETMIISPEED |
259 			  MVNETA_PANC_SETGMIISPEED |
260 			  MVNETA_PANC_SETFULLDX);
261 
262 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
263 		case IFM_1000_SX:
264 		case IFM_1000_LX:
265 		case IFM_1000_CX:
266 		case IFM_1000_T:
267 			panc |= MVNETA_PANC_SETGMIISPEED;
268 			break;
269 		case IFM_100_TX:
270 			panc |= MVNETA_PANC_SETMIISPEED;
271 			break;
272 		case IFM_10_T:
273 			break;
274 		}
275 
276 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
277 			panc |= MVNETA_PANC_SETFULLDX;
278 
279 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
280 	}
281 
282 	mvneta_port_change(sc);
283 }
284 
285 void
286 mvneta_inband_statchg(struct mvneta_softc *sc)
287 {
288 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
289 	uint32_t reg;
290 
291 	sc->sc_mii.mii_media_status = IFM_AVALID;
292 	sc->sc_mii.mii_media_active = IFM_ETHER;
293 
294 	reg = MVNETA_READ(sc, MVNETA_PS0);
295 	if (reg & MVNETA_PS0_LINKUP)
296 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
297 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
298 		sc->sc_mii.mii_media_active |= subtype;
299 	else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
300 		sc->sc_mii.mii_media_active |= subtype;
301 	else if (reg & MVNETA_PS0_GMIISPEED)
302 		sc->sc_mii.mii_media_active |= IFM_1000_T;
303 	else if (reg & MVNETA_PS0_MIISPEED)
304 		sc->sc_mii.mii_media_active |= IFM_100_TX;
305 	else
306 		sc->sc_mii.mii_media_active |= IFM_10_T;
307 	if (reg & MVNETA_PS0_FULLDX)
308 		sc->sc_mii.mii_media_active |= IFM_FDX;
309 
310 	mvneta_port_change(sc);
311 }
312 
313 void
314 mvneta_enaddr_write(struct mvneta_softc *sc)
315 {
316 	uint32_t maddrh, maddrl;
317 	maddrh  = sc->sc_enaddr[0] << 24;
318 	maddrh |= sc->sc_enaddr[1] << 16;
319 	maddrh |= sc->sc_enaddr[2] << 8;
320 	maddrh |= sc->sc_enaddr[3];
321 	maddrl  = sc->sc_enaddr[4] << 8;
322 	maddrl |= sc->sc_enaddr[5];
323 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
324 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
325 }
326 
327 void
328 mvneta_wininit(struct mvneta_softc *sc)
329 {
330 	uint32_t en;
331 	int i;
332 
333 #ifdef __armv7__
334 	if (mvmbus_dram_info == NULL)
335 		panic("%s: mbus dram information not set up",
336 		    sc->sc_dev.dv_xname);
337 #endif
338 
339 	for (i = 0; i < MVNETA_NWINDOW; i++) {
340 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
341 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
342 
343 		if (i < MVNETA_NREMAP)
344 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
345 	}
346 
347 	en = MVNETA_BARE_EN_MASK;
348 
349 #ifdef __armv7__
350 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
351 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
352 
353 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
354 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
355 		    MVNETA_BASEADDR_ATTR(win->attr)	|
356 		    MVNETA_BASEADDR_BASE(win->base));
357 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
358 
359 		en &= ~(1 << i);
360 	}
361 #else
362 	MVNETA_WRITE(sc, MVNETA_S(0), MVNETA_S_SIZE(0));
363 	en &= ~(1 << 0);
364 #endif
365 
366 	MVNETA_WRITE(sc, MVNETA_BARE, en);
367 }
368 
369 #define COMPHY_SIP_POWER_ON	0x82000001
370 #define COMPHY_SIP_POWER_OFF	0x82000002
371 #define COMPHY_SPEED(x)		((x) << 2)
372 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
373 #define  COMPHY_SPEED_2_5G		1
374 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
375 #define  COMPHY_SPEED_5G		3
376 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
377 #define  COMPHY_SPEED_6G		5
378 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
379 #define COMPHY_UNIT(x)		((x) << 8)
380 #define COMPHY_MODE(x)		((x) << 12)
381 #define  COMPHY_MODE_SATA		1
382 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
383 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
384 #define  COMPHY_MODE_USB3H		4
385 #define  COMPHY_MODE_USB3D		5
386 #define  COMPHY_MODE_PCIE		6
387 #define  COMPHY_MODE_RXAUI		7
388 #define  COMPHY_MODE_XFI		8
389 #define  COMPHY_MODE_SFI		9
390 #define  COMPHY_MODE_USB3		10
391 
392 void
393 mvneta_comphy_init(struct mvneta_softc *sc)
394 {
395 	int node, phys[2], lane, unit;
396 	uint32_t mode;
397 
398 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
399 	    sizeof(phys))
400 		return;
401 	node = OF_getnodebyphandle(phys[0]);
402 	if (!node)
403 		return;
404 
405 	lane = OF_getpropint(node, "reg", 0);
406 	unit = phys[1];
407 
408 	switch (sc->sc_phy_mode) {
409 	case PHY_MODE_1000BASEX:
410 	case PHY_MODE_SGMII:
411 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
412 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
413 		    COMPHY_UNIT(unit);
414 		break;
415 	case PHY_MODE_2500BASEX:
416 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
417 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
418 		    COMPHY_UNIT(unit);
419 		break;
420 	default:
421 		return;
422 	}
423 
424 	smc_call(COMPHY_SIP_POWER_ON, lane, mode, 0);
425 }
426 
427 int
428 mvneta_match(struct device *parent, void *cfdata, void *aux)
429 {
430 	struct fdt_attach_args *faa = aux;
431 
432 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
433 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
434 }
435 
436 void
437 mvneta_attach(struct device *parent, struct device *self, void *aux)
438 {
439 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
440 	struct fdt_attach_args *faa = aux;
441 	uint32_t ctl0, ctl2, ctl4, panc;
442 	struct ifnet *ifp;
443 	int i, len, node;
444 	char *phy_mode;
445 	char *managed;
446 
447 	sc->sc_iot = faa->fa_iot;
448 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
449 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
450 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
451 		printf("%s: cannot map registers\n", self->dv_xname);
452 		return;
453 	}
454 	sc->sc_dmat = faa->fa_dmat;
455 	sc->sc_node = faa->fa_node;
456 
457 	clock_enable(faa->fa_node, NULL);
458 	sc->sc_clk_freq = clock_get_frequency_idx(faa->fa_node, 0);
459 
460 	pinctrl_byname(faa->fa_node, "default");
461 
462 	len = OF_getproplen(faa->fa_node, "phy-mode");
463 	if (len <= 0) {
464 		printf(": cannot extract phy-mode\n");
465 		return;
466 	}
467 
468 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
469 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
470 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
471 		sc->sc_phy_mode = PHY_MODE_QSGMII;
472 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
473 		sc->sc_phy_mode = PHY_MODE_SGMII;
474 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
475 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
476 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
477 		sc->sc_phy_mode = PHY_MODE_RGMII;
478 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
479 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
480 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
481 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
482 	else {
483 		printf(": cannot use phy-mode %s\n", phy_mode);
484 		return;
485 	}
486 	free(phy_mode, M_TEMP, len);
487 
488 	/* TODO: check child's name to be "fixed-link" */
489 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
490 	    OF_child(faa->fa_node))
491 		sc->sc_fixed_link = 1;
492 
493 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
494 		managed = malloc(len, M_TEMP, M_WAITOK);
495 		OF_getprop(faa->fa_node, "managed", managed, len);
496 		if (!strncmp(managed, "in-band-status",
497 		    strlen("in-band-status"))) {
498 			sc->sc_fixed_link = 1;
499 			sc->sc_inband_status = 1;
500 		}
501 		free(managed, M_TEMP, len);
502 	}
503 
504 	if (!sc->sc_fixed_link) {
505 		sc->sc_phy = OF_getpropint(faa->fa_node, "phy", 0);
506 		node = OF_getnodebyphandle(sc->sc_phy);
507 		if (!node) {
508 			printf(": cannot find phy in fdt\n");
509 			return;
510 		}
511 
512 		if ((sc->sc_phyloc = OF_getpropint(node, "reg", -1)) == -1) {
513 			printf(": cannot extract phy addr\n");
514 			return;
515 		}
516 	}
517 
518 	mvneta_wininit(sc);
519 
520 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
521 	    ETHER_ADDR_LEN) {
522 		OF_getprop(faa->fa_node, "local-mac-address",
523 		    sc->sc_enaddr, ETHER_ADDR_LEN);
524 		mvneta_enaddr_write(sc);
525 	} else {
526 		uint32_t maddrh, maddrl;
527 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
528 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
529 		if (maddrh || maddrl) {
530 			sc->sc_enaddr[0] = maddrh >> 24;
531 			sc->sc_enaddr[1] = maddrh >> 16;
532 			sc->sc_enaddr[2] = maddrh >> 8;
533 			sc->sc_enaddr[3] = maddrh >> 0;
534 			sc->sc_enaddr[4] = maddrl >> 8;
535 			sc->sc_enaddr[5] = maddrl >> 0;
536 		} else
537 			ether_fakeaddr(&sc->sc_ac.ac_if);
538 	}
539 
540 	sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0);
541 
542 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
543 
544 	/* disable port */
545 	MVNETA_WRITE(sc, MVNETA_PMACC0,
546 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
547 	delay(200);
548 
549 	/* clear all cause registers */
550 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
551 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
552 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
553 
554 	/* mask all interrupts */
555 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
556 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
557 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
558 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
559 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
560 
561 	/* enable MBUS Retry bit16 */
562 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
563 
564 	/* enable access for CPU0 */
565 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
566 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
567 
568 	/* reset RX and TX DMAs */
569 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
570 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
571 
572 	/* disable legacy WRR, disable EJP, release from reset */
573 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
574 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
575 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
576 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
577 	}
578 
579 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
580 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
581 
582 	/* set port acceleration mode */
583 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
584 
585 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
586 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
587 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
588 
589 	/* Set SDC register except IPGINT bits */
590 	MVNETA_WRITE(sc, MVNETA_SDC,
591 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
592 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
593 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
594 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
595 
596 	/* XXX: Disable PHY polling in hardware */
597 	MVNETA_WRITE(sc, MVNETA_EUC,
598 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
599 
600 	/* clear uni-/multicast tables */
601 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
602 	memset(dfut, 0, sizeof(dfut));
603 	memset(dfsmt, 0, sizeof(dfut));
604 	memset(dfomt, 0, sizeof(dfut));
605 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
606 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
607 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
608 
609 	MVNETA_WRITE(sc, MVNETA_PIE,
610 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
611 
612 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
613 
614 	/* Setup phy. */
615 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
616 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
617 	ctl4 = MVNETA_READ(sc, MVNETA_PMACC4);
618 	panc = MVNETA_READ(sc, MVNETA_PANC);
619 
620 	/* Force link down to change in-band settings. */
621 	panc &= ~MVNETA_PANC_FORCELINKPASS;
622 	panc |= MVNETA_PANC_FORCELINKFAIL;
623 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
624 
625 	mvneta_comphy_init(sc);
626 
627 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
628 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
629 	ctl4 &= ~(MVNETA_PMACC4_SHORT_PREAMBLE);
630 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
631 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
632 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
633 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
634 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
635 
636 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
637 	switch (sc->sc_phy_mode) {
638 	case PHY_MODE_QSGMII:
639 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
640 		    MVNETA_SERDESCFG_QSGMII_PROTO);
641 		ctl2 |= MVNETA_PMACC2_PCSEN;
642 		break;
643 	case PHY_MODE_SGMII:
644 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
645 		    MVNETA_SERDESCFG_SGMII_PROTO);
646 		ctl2 |= MVNETA_PMACC2_PCSEN;
647 		break;
648 	case PHY_MODE_1000BASEX:
649 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
650 		    MVNETA_SERDESCFG_SGMII_PROTO);
651 		ctl2 |= MVNETA_PMACC2_PCSEN;
652 		break;
653 	case PHY_MODE_2500BASEX:
654 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
655 		    MVNETA_SERDESCFG_HSGMII_PROTO);
656 		ctl2 |= MVNETA_PMACC2_PCSEN;
657 		ctl4 |= MVNETA_PMACC4_SHORT_PREAMBLE;
658 		break;
659 	default:
660 		break;
661 	}
662 
663 	/* Use Auto-Negotiation for Inband Status only */
664 	if (sc->sc_inband_status) {
665 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
666 		    MVNETA_PANC_FORCELINKPASS);
667 		/* TODO: read mode from SFP */
668 		if (1) {
669 			/* 802.3z */
670 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
671 			panc |= (MVNETA_PANC_INBANDANEN |
672 			    MVNETA_PANC_SETGMIISPEED |
673 			    MVNETA_PANC_SETFULLDX);
674 		} else {
675 			/* SGMII */
676 			ctl2 |= MVNETA_PMACC2_INBANDAN;
677 			panc |= (MVNETA_PANC_INBANDANEN |
678 			    MVNETA_PANC_ANSPEEDEN |
679 			    MVNETA_PANC_ANDUPLEXEN);
680 		}
681 		MVNETA_WRITE(sc, MVNETA_OMSCD,
682 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
683 	} else {
684 		MVNETA_WRITE(sc, MVNETA_OMSCD,
685 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
686 	}
687 
688 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
689 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
690 	MVNETA_WRITE(sc, MVNETA_PMACC4, ctl4);
691 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
692 
693 	/* Port reset */
694 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
695 		;
696 
697 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
698 	    mvneta_intr, sc, sc->sc_dev.dv_xname);
699 
700 	ifp = &sc->sc_ac.ac_if;
701 	ifp->if_softc = sc;
702 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
703 	ifp->if_xflags = IFXF_MPSAFE;
704 	ifp->if_qstart = mvneta_start;
705 	ifp->if_ioctl = mvneta_ioctl;
706 	ifp->if_watchdog = mvneta_watchdog;
707 	ifp->if_capabilities = IFCAP_VLAN_MTU;
708 
709 #if notyet
710 	/*
711 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
712 	 */
713 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
714 				IFCAP_CSUM_UDPv4;
715 
716 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
717 	/*
718 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
719 	 */
720 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
721 #endif
722 
723 	ifq_set_maxlen(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
724 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
725 
726 	/*
727 	 * Do MII setup.
728 	 */
729 	sc->sc_mii.mii_ifp = ifp;
730 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
731 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
732 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
733 
734 	ifmedia_init(&sc->sc_mii.mii_media, 0,
735 	    mvneta_mediachange, mvneta_mediastatus);
736 
737 	config_defer(self, mvneta_attach_deferred);
738 }
739 
740 void
741 mvneta_attach_deferred(struct device *self)
742 {
743 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
744 	struct ifnet *ifp = &sc->sc_ac.ac_if;
745 
746 	if (!sc->sc_fixed_link) {
747 		sc->sc_mdio = mii_byphandle(sc->sc_phy);
748 		if (sc->sc_mdio == NULL) {
749 			printf("%s: mdio bus not yet attached\n", self->dv_xname);
750 			return;
751 		}
752 
753 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
754 		    MII_OFFSET_ANY, 0);
755 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
756 			printf("%s: no PHY found!\n", self->dv_xname);
757 			ifmedia_add(&sc->sc_mii.mii_media,
758 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
759 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
760 		} else
761 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
762 	} else {
763 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
764 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
765 
766 		if (sc->sc_inband_status) {
767 			switch (sc->sc_phy_mode) {
768 			case PHY_MODE_1000BASEX:
769 				sc->sc_mii.mii_media_active =
770 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
771 				break;
772 			case PHY_MODE_2500BASEX:
773 				sc->sc_mii.mii_media_active =
774 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
775 				break;
776 			default:
777 				break;
778 			}
779 			mvneta_inband_statchg(sc);
780 		} else {
781 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
782 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
783 			mvneta_miibus_statchg(self);
784 		}
785 
786 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
787 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
788 	}
789 
790 	/*
791 	 * Call MI attach routines.
792 	 */
793 	if_attach(ifp);
794 	ether_ifattach(ifp);
795 
796 #if NKSTAT > 0
797 	mvneta_kstat_attach(sc);
798 #endif
799 }
800 
801 void
802 mvneta_tick(void *arg)
803 {
804 	struct mvneta_softc *sc = arg;
805 	struct mii_data *mii = &sc->sc_mii;
806 	int s;
807 
808 	s = splnet();
809 	mii_tick(mii);
810 	splx(s);
811 
812 	timeout_add_sec(&sc->sc_tick_ch, 1);
813 }
814 
815 int
816 mvneta_intr(void *arg)
817 {
818 	struct mvneta_softc *sc = arg;
819 	struct ifnet *ifp = &sc->sc_ac.ac_if;
820 	uint32_t ic, misc;
821 
822 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
823 
824 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
825 		KERNEL_LOCK();
826 		misc = MVNETA_READ(sc, MVNETA_PMIC);
827 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
828 		if (sc->sc_inband_status && (misc &
829 		    (MVNETA_PMI_PHYSTATUSCHNG |
830 		    MVNETA_PMI_LINKCHANGE |
831 		    MVNETA_PMI_PSCSYNCCHNG))) {
832 			mvneta_inband_statchg(sc);
833 		}
834 		KERNEL_UNLOCK();
835 	}
836 
837 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
838 		return 1;
839 
840 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
841 		mvneta_tx_proc(sc);
842 
843 	if (ISSET(ic, MVNETA_PRXTXTI_RBICTAPQ(0) | MVNETA_PRXTXTI_RDTAQ(0)))
844 		mvneta_rx_proc(sc);
845 
846 	return 1;
847 }
848 
849 static inline int
850 mvneta_load_mbuf(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m)
851 {
852 	int error;
853 
854 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
855 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
856 	switch (error) {
857 	case EFBIG:
858 		error = m_defrag(m, M_DONTWAIT);
859 		if (error != 0)
860 			break;
861 
862 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
863 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
864 		if (error != 0)
865 			break;
866 
867 		/* FALLTHROUGH */
868 	case 0:
869 		return (0);
870 
871 	default:
872 		break;
873 	}
874 
875         return (error);
876 }
877 
878 static inline void
879 mvneta_encap(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m,
880     unsigned int prod)
881 {
882 	struct mvneta_tx_desc *txd;
883 	uint32_t cmdsts;
884 	unsigned int i;
885 
886 	cmdsts = MVNETA_TX_FIRST_DESC | MVNETA_TX_ZERO_PADDING |
887 	    MVNETA_TX_L4_CSUM_NOT;
888 #if notyet
889 	int m_csumflags;
890 	if (m_csumflags & M_CSUM_IPv4)
891 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
892 	if (m_csumflags & M_CSUM_TCPv4)
893 		cmdsts |=
894 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
895 	if (m_csumflags & M_CSUM_UDPv4)
896 		cmdsts |=
897 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
898 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
899 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
900 
901 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
902 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
903 	}
904 #endif
905 
906 	for (i = 0; i < map->dm_nsegs; i++) {
907 		txd = &sc->sc_txdesc[prod];
908 		txd->bytecnt = map->dm_segs[i].ds_len;
909 		txd->l4ichk = 0;
910 		txd->cmdsts = cmdsts;
911 		txd->nextdescptr = 0;
912 		txd->bufptr = map->dm_segs[i].ds_addr;
913 		txd->_padding[0] = 0;
914 		txd->_padding[1] = 0;
915 		txd->_padding[2] = 0;
916 		txd->_padding[3] = 0;
917 
918 		prod = MVNETA_TX_RING_NEXT(prod);
919 		cmdsts = 0;
920 	}
921 	txd->cmdsts |= MVNETA_TX_LAST_DESC;
922 }
923 
924 static inline void
925 mvneta_sync_txring(struct mvneta_softc *sc, int ops)
926 {
927 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
928 	    MVNETA_DMA_LEN(sc->sc_txring), ops);
929 }
930 
931 void
932 mvneta_start(struct ifqueue *ifq)
933 {
934 	struct ifnet *ifp = ifq->ifq_if;
935 	struct mvneta_softc *sc = ifp->if_softc;
936 	unsigned int prod, nprod, free, used = 0, nused;
937 	struct mbuf *m;
938 	bus_dmamap_t map;
939 
940 	/* If Link is DOWN, can't start TX */
941 	if (!MVNETA_IS_LINKUP(sc)) {
942 		ifq_purge(ifq);
943 		return;
944 	}
945 
946 	mvneta_sync_txring(sc, BUS_DMASYNC_POSTWRITE);
947 
948 	prod = sc->sc_tx_prod;
949 	free = MVNETA_TX_RING_CNT - (prod - sc->sc_tx_cons);
950 
951 	for (;;) {
952 		if (free < MVNETA_NTXSEG - 1) {
953 			ifq_set_oactive(ifq);
954 			break;
955 		}
956 
957 		m = ifq_dequeue(ifq);
958 		if (m == NULL)
959 			break;
960 
961 		map = sc->sc_txbuf[prod].tb_map;
962 		if (mvneta_load_mbuf(sc, map, m) != 0) {
963 			m_freem(m);
964 			ifp->if_oerrors++; /* XXX atomic */
965 			continue;
966 		}
967 
968 #if NBPFILTER > 0
969 		if (ifp->if_bpf)
970 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
971 #endif
972 
973 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
974 		    BUS_DMASYNC_PREWRITE);
975 
976 		mvneta_encap(sc, map, m, prod);
977 
978 		nprod = (prod + map->dm_nsegs) % MVNETA_TX_RING_CNT;
979 		sc->sc_txbuf[prod].tb_map = sc->sc_txbuf[nprod].tb_map;
980 		prod = nprod;
981 		sc->sc_txbuf[prod].tb_map = map;
982 		sc->sc_txbuf[prod].tb_m = m;
983 
984 		free -= map->dm_nsegs;
985 
986 		nused = used + map->dm_nsegs;
987 		if (nused > MVNETA_PTXSU_MAX) {
988 			mvneta_sync_txring(sc,
989 			    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
990 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
991 			    MVNETA_PTXSU_NOWD(used));
992 			used = map->dm_nsegs;
993 		} else
994 			used = nused;
995 	}
996 
997 	mvneta_sync_txring(sc, BUS_DMASYNC_PREWRITE);
998 
999 	sc->sc_tx_prod = prod;
1000 	if (used)
1001 		MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NOWD(used));
1002 }
1003 
1004 int
1005 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1006 {
1007 	struct mvneta_softc *sc = ifp->if_softc;
1008 	struct ifreq *ifr = (struct ifreq *)addr;
1009 	int s, error = 0;
1010 
1011 	s = splnet();
1012 
1013 	switch (cmd) {
1014 	case SIOCSIFADDR:
1015 		ifp->if_flags |= IFF_UP;
1016 		/* FALLTHROUGH */
1017 	case SIOCSIFFLAGS:
1018 		if (ifp->if_flags & IFF_UP) {
1019 			if (ifp->if_flags & IFF_RUNNING)
1020 				error = ENETRESET;
1021 			else
1022 				mvneta_up(sc);
1023 		} else {
1024 			if (ifp->if_flags & IFF_RUNNING)
1025 				mvneta_down(sc);
1026 		}
1027 		break;
1028 	case SIOCGIFMEDIA:
1029 	case SIOCSIFMEDIA:
1030 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
1031 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1032 		break;
1033 	case SIOCGIFRXR:
1034 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1035 		    NULL, MCLBYTES, &sc->sc_rx_ring);
1036 		break;
1037 	case SIOCGIFSFFPAGE:
1038 		error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR);
1039 		if (error != 0)
1040 			break;
1041 
1042 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1043 		rw_exit(&mvneta_sff_lock);
1044 		break;
1045 	default:
1046 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
1047 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1048 		break;
1049 	}
1050 
1051 	if (error == ENETRESET) {
1052 		if (ifp->if_flags & IFF_RUNNING)
1053 			mvneta_iff(sc);
1054 		error = 0;
1055 	}
1056 
1057 	splx(s);
1058 
1059 	return error;
1060 }
1061 
1062 void
1063 mvneta_port_change(struct mvneta_softc *sc)
1064 {
1065 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
1066 		sc->sc_link = !sc->sc_link;
1067 
1068 		if (sc->sc_link) {
1069 			if (!sc->sc_inband_status) {
1070 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1071 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
1072 				panc |= MVNETA_PANC_FORCELINKPASS;
1073 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1074 			}
1075 			mvneta_port_up(sc);
1076 		} else {
1077 			if (!sc->sc_inband_status) {
1078 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1079 				panc &= ~MVNETA_PANC_FORCELINKPASS;
1080 				panc |= MVNETA_PANC_FORCELINKFAIL;
1081 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1082 			}
1083 		}
1084 	}
1085 }
1086 
1087 void
1088 mvneta_port_up(struct mvneta_softc *sc)
1089 {
1090 	/* Enable port RX/TX. */
1091 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
1092 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
1093 }
1094 
1095 int
1096 mvneta_up(struct mvneta_softc *sc)
1097 {
1098 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1099 	struct mvneta_buf *txb, *rxb;
1100 	int i;
1101 
1102 	DPRINTFN(2, ("mvneta_up\n"));
1103 
1104 	/* Allocate Tx descriptor ring. */
1105 	sc->sc_txring = mvneta_dmamem_alloc(sc,
1106 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
1107 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
1108 
1109 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
1110 	    M_DEVBUF, M_WAITOK);
1111 
1112 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1113 		txb = &sc->sc_txbuf[i];
1114 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
1115 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1116 		txb->tb_m = NULL;
1117 	}
1118 
1119 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1120 
1121 	/* Allocate Rx descriptor ring. */
1122 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
1123 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
1124 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
1125 
1126 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
1127 	    M_DEVBUF, M_WAITOK);
1128 
1129 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1130 		rxb = &sc->sc_rxbuf[i];
1131 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1132 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1133 		rxb->tb_m = NULL;
1134 	}
1135 
1136 	/* Set Rx descriptor ring data. */
1137 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
1138 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
1139 	    ((MCLBYTES >> 3) << 19));
1140 
1141 	if (sc->sc_clk_freq != 0) {
1142 		/*
1143 		 * Use the Non Occupied Descriptors Threshold to
1144 		 * interrupt when the descriptors granted by rxr are
1145 		 * used up, otherwise wait until the RX Interrupt
1146 		 * Time Threshold is reached.
1147 		 */
1148 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0),
1149 		    MVNETA_PRXDQTH_ODT(MVNETA_RX_RING_CNT) |
1150 		    MVNETA_PRXDQTH_NODT(2));
1151 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), sc->sc_clk_freq / 4000);
1152 	} else {
1153 		/* Time based moderation is hard without a clock */
1154 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
1155 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), 0);
1156 	}
1157 
1158 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
1159 
1160 	/* Set Tx queue bandwidth. */
1161 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
1162 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
1163 
1164 	/* Set Tx descriptor ring data. */
1165 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
1166 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
1167 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT) |
1168 	    MVNETA_PTXDQS_TBT(MIN(MVNETA_TX_RING_CNT / 2, ifp->if_txmit)));
1169 
1170 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1171 
1172 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
1173 	mvneta_fill_rx_ring(sc);
1174 
1175 	/* TODO: correct frame size */
1176 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1177 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
1178 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
1179 
1180 	/* set max MTU */
1181 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
1182 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
1183 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
1184 
1185 	/* enable port */
1186 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1187 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
1188 
1189 	mvneta_enaddr_write(sc);
1190 
1191 	/* Program promiscuous mode and multicast filters. */
1192 	mvneta_iff(sc);
1193 
1194 	if (!sc->sc_fixed_link)
1195 		mii_mediachg(&sc->sc_mii);
1196 
1197 	if (sc->sc_link)
1198 		mvneta_port_up(sc);
1199 
1200 	/* Enable interrupt masks */
1201 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
1202 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_RDTAQ(0) |
1203 	    MVNETA_PRXTXTI_PMISCICSUMMARY);
1204 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1205 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
1206 
1207 	timeout_add_sec(&sc->sc_tick_ch, 1);
1208 
1209 	ifp->if_flags |= IFF_RUNNING;
1210 	ifq_clr_oactive(&ifp->if_snd);
1211 
1212 	return 0;
1213 }
1214 
1215 void
1216 mvneta_down(struct mvneta_softc *sc)
1217 {
1218 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1219 	uint32_t reg, txinprog, txfifoemp;
1220 	struct mvneta_buf *txb, *rxb;
1221 	int i, cnt;
1222 
1223 	DPRINTFN(2, ("mvneta_down\n"));
1224 
1225 	timeout_del(&sc->sc_tick_ch);
1226 	ifp->if_flags &= ~IFF_RUNNING;
1227 	intr_barrier(sc->sc_ih);
1228 
1229 	/* Stop Rx port activity. Check port Rx activity. */
1230 	reg = MVNETA_READ(sc, MVNETA_RQC);
1231 	if (reg & MVNETA_RQC_ENQ_MASK)
1232 		/* Issue stop command for active channels only */
1233 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1234 
1235 	/* Stop Tx port activity. Check port Tx activity. */
1236 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1237 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1238 
1239 	txinprog = MVNETA_PS_TXINPROG_(0);
1240 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1241 
1242 #define RX_DISABLE_TIMEOUT		0x1000000
1243 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1244 	/* Wait for all Rx activity to terminate. */
1245 	cnt = 0;
1246 	do {
1247 		if (cnt >= RX_DISABLE_TIMEOUT) {
1248 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1249 			    sc->sc_dev.dv_xname, reg);
1250 			break;
1251 		}
1252 		cnt++;
1253 
1254 		/*
1255 		 * Check Receive Queue Command register that all Rx queues
1256 		 * are stopped
1257 		 */
1258 		reg = MVNETA_READ(sc, MVNETA_RQC);
1259 	} while (reg & 0xff);
1260 
1261 	/* Double check to verify that TX FIFO is empty */
1262 	cnt = 0;
1263 	while (1) {
1264 		do {
1265 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1266 				printf("%s: timeout for TX FIFO empty. status "
1267 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1268 				break;
1269 			}
1270 			cnt++;
1271 
1272 			reg = MVNETA_READ(sc, MVNETA_PS);
1273 		} while (!(reg & txfifoemp) || reg & txinprog);
1274 
1275 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1276 			break;
1277 
1278 		/* Double check */
1279 		reg = MVNETA_READ(sc, MVNETA_PS);
1280 		if (reg & txfifoemp && !(reg & txinprog))
1281 			break;
1282 		else
1283 			printf("%s: TX FIFO empty double check failed."
1284 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1285 			    cnt, reg);
1286 	}
1287 
1288 	delay(200);
1289 
1290 	/* disable port */
1291 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1292 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1293 	delay(200);
1294 
1295 	/* mask all interrupts */
1296 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1297 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1298 
1299 	/* clear all cause registers */
1300 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1301 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1302 
1303 	/* Free RX and TX mbufs still in the queues. */
1304 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1305 		txb = &sc->sc_txbuf[i];
1306 		if (txb->tb_m) {
1307 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1308 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1309 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1310 			m_freem(txb->tb_m);
1311 		}
1312 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1313 	}
1314 
1315 	mvneta_dmamem_free(sc, sc->sc_txring);
1316 	free(sc->sc_txbuf, M_DEVBUF, 0);
1317 
1318 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1319 		rxb = &sc->sc_rxbuf[i];
1320 		if (rxb->tb_m) {
1321 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1322 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1323 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1324 			m_freem(rxb->tb_m);
1325 		}
1326 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1327 	}
1328 
1329 	mvneta_dmamem_free(sc, sc->sc_rxring);
1330 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1331 
1332 	/* reset RX and TX DMAs */
1333 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1334 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1335 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1336 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1337 
1338 	ifq_clr_oactive(&ifp->if_snd);
1339 }
1340 
1341 void
1342 mvneta_watchdog(struct ifnet *ifp)
1343 {
1344 	struct mvneta_softc *sc = ifp->if_softc;
1345 
1346 	/*
1347 	 * Reclaim first as there is a possibility of losing Tx completion
1348 	 * interrupts.
1349 	 */
1350 	mvneta_tx_proc(sc);
1351 	if (sc->sc_tx_prod != sc->sc_tx_cons) {
1352 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1353 
1354 		ifp->if_oerrors++;
1355 	}
1356 }
1357 
1358 /*
1359  * Set media options.
1360  */
1361 int
1362 mvneta_mediachange(struct ifnet *ifp)
1363 {
1364 	struct mvneta_softc *sc = ifp->if_softc;
1365 
1366 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1367 		mii_mediachg(&sc->sc_mii);
1368 
1369 	return (0);
1370 }
1371 
1372 /*
1373  * Report current media status.
1374  */
1375 void
1376 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1377 {
1378 	struct mvneta_softc *sc = ifp->if_softc;
1379 
1380 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1381 		mii_pollstat(&sc->sc_mii);
1382 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1383 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1384 	}
1385 
1386 	if (sc->sc_fixed_link) {
1387 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1388 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1389 	}
1390 }
1391 
1392 void
1393 mvneta_rx_proc(struct mvneta_softc *sc)
1394 {
1395 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1396 	struct mvneta_rx_desc *rxd;
1397 	struct mvneta_buf *rxb;
1398 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1399 	struct mbuf *m;
1400 	uint32_t rxstat;
1401 	unsigned int i, done, cons;
1402 
1403 	done = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1404 	if (done == 0)
1405 		return;
1406 
1407 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1408 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTREAD);
1409 
1410 	cons = sc->sc_rx_cons;
1411 
1412 	for (i = 0; i < done; i++) {
1413 		rxd = &sc->sc_rxdesc[cons];
1414 		rxb = &sc->sc_rxbuf[cons];
1415 
1416 		m = rxb->tb_m;
1417 		rxb->tb_m = NULL;
1418 
1419 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1420 		    m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1421 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1422 
1423 		rxstat = rxd->cmdsts;
1424 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1425 #if 0
1426 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1427 
1428 			if (err == MVNETA_RX_CRC_ERROR)
1429 				ifp->if_ierrors++;
1430 			if (err == MVNETA_RX_OVERRUN_ERROR)
1431 				ifp->if_ierrors++;
1432 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1433 				ifp->if_ierrors++;
1434 			if (err == MVNETA_RX_RESOURCE_ERROR)
1435 				ifp->if_ierrors++;
1436 #else
1437 			ifp->if_ierrors++;
1438 #endif
1439 			m_freem(m);
1440 		} else {
1441 			m->m_pkthdr.len = m->m_len = rxd->bytecnt;
1442 			m_adj(m, MVNETA_HWHEADER_SIZE);
1443 
1444 			ml_enqueue(&ml, m);
1445 		}
1446 
1447 #if notyet
1448 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1449 			int flgs = 0;
1450 
1451 			/* Check IPv4 header checksum */
1452 			flgs |= M_CSUM_IPv4;
1453 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1454 				flgs |= M_CSUM_IPv4_BAD;
1455 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1456 				/*
1457 				 * Check TCPv4/UDPv4 checksum for
1458 				 * non-fragmented packet only.
1459 				 *
1460 				 * It seemd that sometimes
1461 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1462 				 * even if the checksum is correct and the
1463 				 * packet was not fragmented. So we don't set
1464 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1465 				 */
1466 
1467 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1468 					MVNETA_RX_L4_TYPE_TCP) &&
1469 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1470 					flgs |= M_CSUM_TCPv4;
1471 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1472 					MVNETA_RX_L4_TYPE_UDP) &&
1473 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1474 					flgs |= M_CSUM_UDPv4;
1475 			}
1476 			m->m_pkthdr.csum_flags = flgs;
1477 		}
1478 #endif
1479 
1480 		if_rxr_put(&sc->sc_rx_ring, 1);
1481 
1482 		cons = MVNETA_RX_RING_NEXT(cons);
1483 
1484 		if (i == MVNETA_PRXSU_MAX) {
1485 			MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1486 			    MVNETA_PRXSU_NOPD(MVNETA_PRXSU_MAX));
1487 
1488 			/* tweaking the iterator inside the loop is fun */
1489 			done -= MVNETA_PRXSU_MAX;
1490 			i = 0;
1491 		}
1492 	}
1493 
1494 	sc->sc_rx_cons = cons;
1495 
1496 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1497 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREREAD);
1498 
1499 	if (i > 0) {
1500 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1501 		    MVNETA_PRXSU_NOPD(i));
1502 	}
1503 
1504 	if (ifiq_input(&ifp->if_rcv, &ml))
1505 		if_rxr_livelocked(&sc->sc_rx_ring);
1506 
1507 	mvneta_fill_rx_ring(sc);
1508 }
1509 
1510 void
1511 mvneta_tx_proc(struct mvneta_softc *sc)
1512 {
1513 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1514 	struct ifqueue *ifq = &ifp->if_snd;
1515 	struct mvneta_tx_desc *txd;
1516 	struct mvneta_buf *txb;
1517 	unsigned int i, cons, done;
1518 
1519 	if (!(ifp->if_flags & IFF_RUNNING))
1520 		return;
1521 
1522 	done = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1523 	if (done == 0)
1524 		return;
1525 
1526 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1527 	    MVNETA_DMA_LEN(sc->sc_txring),
1528 	    BUS_DMASYNC_POSTREAD);
1529 
1530 	cons = sc->sc_tx_cons;
1531 
1532 	for (i = 0; i < done; i++) {
1533 		txd = &sc->sc_txdesc[cons];
1534 		txb = &sc->sc_txbuf[cons];
1535 
1536 		if (txb->tb_m) {
1537 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1538 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1539 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1540 
1541 			m_freem(txb->tb_m);
1542 			txb->tb_m = NULL;
1543 		}
1544 
1545 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1546 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1547 
1548 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1549 				ifp->if_collisions++;
1550 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1551 				ifp->if_oerrors++;
1552 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1553 				ifp->if_collisions++;
1554 		}
1555 
1556 		cons = MVNETA_TX_RING_NEXT(cons);
1557 
1558 		if (i == MVNETA_PTXSU_MAX) {
1559 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1560 			    MVNETA_PTXSU_NORB(MVNETA_PTXSU_MAX));
1561 
1562 			/* tweaking the iterator inside the loop is fun */
1563 			done -= MVNETA_PTXSU_MAX;
1564 			i = 0;
1565 		}
1566 	}
1567 
1568 	sc->sc_tx_cons = cons;
1569 
1570 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1571 	    MVNETA_DMA_LEN(sc->sc_txring),
1572 	    BUS_DMASYNC_PREREAD);
1573 
1574 	if (i > 0) {
1575 		MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1576 		    MVNETA_PTXSU_NORB(i));
1577 	}
1578 	if (ifq_is_oactive(ifq))
1579 		ifq_restart(ifq);
1580 }
1581 
1582 uint8_t
1583 mvneta_crc8(const uint8_t *data, size_t size)
1584 {
1585 	int bit;
1586 	uint8_t byte;
1587 	uint8_t crc = 0;
1588 	const uint8_t poly = 0x07;
1589 
1590 	while(size--)
1591 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1592 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1593 
1594 	return crc;
1595 }
1596 
1597 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1598 
1599 void
1600 mvneta_iff(struct mvneta_softc *sc)
1601 {
1602 	struct arpcom *ac = &sc->sc_ac;
1603 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1604 	struct ether_multi *enm;
1605 	struct ether_multistep step;
1606 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1607 	uint32_t pxc;
1608 	int i;
1609 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1610 
1611 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1612 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1613 	ifp->if_flags &= ~IFF_ALLMULTI;
1614 	memset(dfut, 0, sizeof(dfut));
1615 	memset(dfsmt, 0, sizeof(dfsmt));
1616 	memset(dfomt, 0, sizeof(dfomt));
1617 
1618 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1619 		ifp->if_flags |= IFF_ALLMULTI;
1620 		if (ifp->if_flags & IFF_PROMISC)
1621 			pxc |= MVNETA_PXC_UPM;
1622 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1623 			dfsmt[i] = dfomt[i] =
1624 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1625 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1626 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1627 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1628 		}
1629 	} else {
1630 		ETHER_FIRST_MULTI(step, ac, enm);
1631 		while (enm != NULL) {
1632 			/* chip handles some IPv4 multicast specially */
1633 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1634 				i = enm->enm_addrlo[5];
1635 				dfsmt[i>>2] |=
1636 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1637 			} else {
1638 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1639 				dfomt[i>>2] |=
1640 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1641 			}
1642 
1643 			ETHER_NEXT_MULTI(step, enm);
1644 		}
1645 	}
1646 
1647 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1648 
1649 	/* Set Destination Address Filter Unicast Table */
1650 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1651 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1652 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1653 
1654 	/* Set Destination Address Filter Multicast Tables */
1655 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1656 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1657 }
1658 
1659 struct mvneta_dmamem *
1660 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1661 {
1662 	struct mvneta_dmamem *mdm;
1663 	int nsegs;
1664 
1665 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1666 	mdm->mdm_size = size;
1667 
1668 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1669 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1670 		goto mdmfree;
1671 
1672 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1673 	    &nsegs, BUS_DMA_WAITOK) != 0)
1674 		goto destroy;
1675 
1676 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1677 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1678 		goto free;
1679 
1680 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1681 	    NULL, BUS_DMA_WAITOK) != 0)
1682 		goto unmap;
1683 
1684 	bzero(mdm->mdm_kva, size);
1685 
1686 	return (mdm);
1687 
1688 unmap:
1689 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1690 free:
1691 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1692 destroy:
1693 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1694 mdmfree:
1695 	free(mdm, M_DEVBUF, 0);
1696 
1697 	return (NULL);
1698 }
1699 
1700 void
1701 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1702 {
1703 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1704 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1705 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1706 	free(mdm, M_DEVBUF, 0);
1707 }
1708 
1709 static inline struct mbuf *
1710 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1711 {
1712 	struct mbuf *m = NULL;
1713 
1714 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1715 	if (m == NULL)
1716 		return (NULL);
1717 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1718 
1719 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1720 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1721 		m_freem(m);
1722 		return (NULL);
1723 	}
1724 
1725 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1726 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1727 
1728 	return (m);
1729 }
1730 
1731 void
1732 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1733 {
1734 	struct mvneta_rx_desc *rxd;
1735 	struct mvneta_buf *rxb;
1736 	unsigned int slots, used = 0;
1737 	unsigned int prod;
1738 
1739 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1740 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTWRITE);
1741 
1742 	prod = sc->sc_rx_prod;
1743 
1744 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_PRXSU_MAX);
1745 	    slots > 0; slots--) {
1746 		rxb = &sc->sc_rxbuf[prod];
1747 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1748 		if (rxb->tb_m == NULL)
1749 			break;
1750 
1751 		rxd = &sc->sc_rxdesc[prod];
1752 		rxd->cmdsts = 0;
1753 		rxd->bufsize = 0;
1754 		rxd->bytecnt = 0;
1755 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1756 		rxd->nextdescptr = 0;
1757 		rxd->_padding[0] = 0;
1758 		rxd->_padding[1] = 0;
1759 		rxd->_padding[2] = 0;
1760 		rxd->_padding[3] = 0;
1761 
1762 		prod = MVNETA_RX_RING_NEXT(prod);
1763 		used++;
1764 	}
1765 	if_rxr_put(&sc->sc_rx_ring, slots);
1766 
1767 	sc->sc_rx_prod = prod;
1768 
1769 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1770 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREWRITE);
1771 
1772 	if (used > 0)
1773 		MVNETA_WRITE(sc, MVNETA_PRXSU(0), MVNETA_PRXSU_NOND(used));
1774 }
1775 
1776 #if NKSTAT > 0
1777 
1778 /* this is used to sort and look up the array of kstats quickly */
1779 enum mvneta_stat {
1780 	mvneta_stat_good_octets_received,
1781 	mvneta_stat_bad_octets_received,
1782 	mvneta_stat_good_frames_received,
1783 	mvneta_stat_mac_trans_error,
1784 	mvneta_stat_bad_frames_received,
1785 	mvneta_stat_broadcast_frames_received,
1786 	mvneta_stat_multicast_frames_received,
1787 	mvneta_stat_frames_64_octets,
1788 	mvneta_stat_frames_65_to_127_octets,
1789 	mvneta_stat_frames_128_to_255_octets,
1790 	mvneta_stat_frames_256_to_511_octets,
1791 	mvneta_stat_frames_512_to_1023_octets,
1792 	mvneta_stat_frames_1024_to_max_octets,
1793 	mvneta_stat_good_octets_sent,
1794 	mvneta_stat_good_frames_sent,
1795 	mvneta_stat_excessive_collision,
1796 	mvneta_stat_multicast_frames_sent,
1797 	mvneta_stat_broadcast_frames_sent,
1798 	mvneta_stat_unrecog_mac_control_received,
1799 	mvneta_stat_good_fc_received,
1800 	mvneta_stat_bad_fc_received,
1801 	mvneta_stat_undersize,
1802 	mvneta_stat_fc_sent,
1803 	mvneta_stat_fragments,
1804 	mvneta_stat_oversize,
1805 	mvneta_stat_jabber,
1806 	mvneta_stat_mac_rcv_error,
1807 	mvneta_stat_bad_crc,
1808 	mvneta_stat_collisions,
1809 	mvneta_stat_late_collisions,
1810 
1811 	mvneta_stat_port_discard,
1812 	mvneta_stat_port_overrun,
1813 
1814 	mvnet_stat_count
1815 };
1816 
1817 struct mvneta_counter {
1818 	const char		 *name;
1819 	enum kstat_kv_unit	 unit;
1820 	bus_size_t		 reg;
1821 };
1822 
1823 static const struct mvneta_counter mvneta_counters[] = {
1824 	[mvneta_stat_good_octets_received] =
1825 	    { "rx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1826 	[mvneta_stat_bad_octets_received] =
1827 	    { "rx bad",		KSTAT_KV_U_BYTES,	0x3008 },
1828 	[mvneta_stat_good_frames_received] =
1829 	    { "rx good",	KSTAT_KV_U_PACKETS,	0x3010 },
1830 	[mvneta_stat_mac_trans_error] =
1831 	    { "tx mac error",	KSTAT_KV_U_PACKETS,	0x300c },
1832 	[mvneta_stat_bad_frames_received] =
1833 	    { "rx bad",		KSTAT_KV_U_PACKETS,	0x3014 },
1834 	[mvneta_stat_broadcast_frames_received] =
1835 	    { "rx bcast",	KSTAT_KV_U_PACKETS,	0x3018 },
1836 	[mvneta_stat_multicast_frames_received] =
1837 	    { "rx mcast",	KSTAT_KV_U_PACKETS,	0x301c },
1838 	[mvneta_stat_frames_64_octets] =
1839 	    { "64B",		KSTAT_KV_U_PACKETS,	0x3020 },
1840 	[mvneta_stat_frames_65_to_127_octets] =
1841 	    { "65-127B",	KSTAT_KV_U_PACKETS,	0x3024 },
1842 	[mvneta_stat_frames_128_to_255_octets] =
1843 	    { "128-255B",	KSTAT_KV_U_PACKETS,	0x3028 },
1844 	[mvneta_stat_frames_256_to_511_octets] =
1845 	    { "256-511B",	KSTAT_KV_U_PACKETS,	0x302c },
1846 	[mvneta_stat_frames_512_to_1023_octets] =
1847 	    { "512-1023B",	KSTAT_KV_U_PACKETS,	0x3030 },
1848 	[mvneta_stat_frames_1024_to_max_octets] =
1849 	    { "1024-maxB",	KSTAT_KV_U_PACKETS,	0x3034 },
1850 	[mvneta_stat_good_octets_sent] =
1851 	    { "tx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1852 	[mvneta_stat_good_frames_sent] =
1853 	    { "tx good",	KSTAT_KV_U_PACKETS,	0x3040 },
1854 	[mvneta_stat_excessive_collision] =
1855 	    { "tx excess coll",	KSTAT_KV_U_PACKETS,	0x3044 },
1856 	[mvneta_stat_multicast_frames_sent] =
1857 	    { "tx mcast",	KSTAT_KV_U_PACKETS,	0x3048 },
1858 	[mvneta_stat_broadcast_frames_sent] =
1859 	    { "tx bcast",	KSTAT_KV_U_PACKETS,	0x304c },
1860 	[mvneta_stat_unrecog_mac_control_received] =
1861 	    { "rx unknown fc",	KSTAT_KV_U_PACKETS,	0x3050 },
1862 	[mvneta_stat_good_fc_received] =
1863 	    { "rx fc good",	KSTAT_KV_U_PACKETS,	0x3058 },
1864 	[mvneta_stat_bad_fc_received] =
1865 	    { "rx fc bad",	KSTAT_KV_U_PACKETS,	0x305c },
1866 	[mvneta_stat_undersize] =
1867 	    { "rx undersize",	KSTAT_KV_U_PACKETS,	0x3060 },
1868 	[mvneta_stat_fc_sent] =
1869 	    { "tx fc",		KSTAT_KV_U_PACKETS,	0x3054 },
1870 	[mvneta_stat_fragments] =
1871 	    { "rx fragments",	KSTAT_KV_U_NONE,	0x3064 },
1872 	[mvneta_stat_oversize] =
1873 	    { "rx oversize",	KSTAT_KV_U_PACKETS,	0x3068 },
1874 	[mvneta_stat_jabber] =
1875 	    { "rx jabber",	KSTAT_KV_U_PACKETS,	0x306c },
1876 	[mvneta_stat_mac_rcv_error] =
1877 	    { "rx mac errors",	KSTAT_KV_U_PACKETS,	0x3070 },
1878 	[mvneta_stat_bad_crc] =
1879 	    { "rx bad crc",	KSTAT_KV_U_PACKETS,	0x3074 },
1880 	[mvneta_stat_collisions] =
1881 	    { "rx colls",	KSTAT_KV_U_PACKETS,	0x3078 },
1882 	[mvneta_stat_late_collisions] =
1883 	    { "rx late colls",	KSTAT_KV_U_PACKETS,	0x307c },
1884 
1885 	[mvneta_stat_port_discard] =
1886 	    { "rx discard",	KSTAT_KV_U_PACKETS,	MVNETA_PXDFC },
1887 	[mvneta_stat_port_overrun] =
1888 	    { "rx overrun",	KSTAT_KV_U_PACKETS,	MVNETA_POFC },
1889 };
1890 
1891 CTASSERT(nitems(mvneta_counters) == mvnet_stat_count);
1892 
1893 int
1894 mvneta_kstat_read(struct kstat *ks)
1895 {
1896 	struct mvneta_softc *sc = ks->ks_softc;
1897 	struct kstat_kv *kvs = ks->ks_data;
1898 	unsigned int i;
1899 	uint32_t hi, lo;
1900 
1901 	for (i = 0; i < nitems(mvneta_counters); i++) {
1902 		const struct mvneta_counter *c = &mvneta_counters[i];
1903 		if (c->reg == 0)
1904 			continue;
1905 
1906 		kstat_kv_u64(&kvs[i]) += (uint64_t)MVNETA_READ(sc, c->reg);
1907 	}
1908 
1909 	/* handle the exceptions */
1910 
1911 	lo = MVNETA_READ(sc, 0x3000);
1912 	hi = MVNETA_READ(sc, 0x3004);
1913 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_received]) +=
1914 	    (uint64_t)hi << 32 | (uint64_t)lo;
1915 
1916 	lo = MVNETA_READ(sc, 0x3038);
1917 	hi = MVNETA_READ(sc, 0x303c);
1918 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_sent]) +=
1919 	    (uint64_t)hi << 32 | (uint64_t)lo;
1920 
1921 	nanouptime(&ks->ks_updated);
1922 
1923 	return (0);
1924 }
1925 
1926 void
1927 mvneta_kstat_tick(void *arg)
1928 {
1929 	struct mvneta_softc *sc = arg;
1930 
1931 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1932 
1933 	if (mtx_enter_try(&sc->sc_kstat_lock)) {
1934 		mvneta_kstat_read(sc->sc_kstat);
1935 		mtx_leave(&sc->sc_kstat_lock);
1936 	}
1937 }
1938 
1939 void
1940 mvneta_kstat_attach(struct mvneta_softc *sc)
1941 {
1942 	struct kstat *ks;
1943 	struct kstat_kv *kvs;
1944 	unsigned int i;
1945 
1946 	mtx_init(&sc->sc_kstat_lock, IPL_SOFTCLOCK);
1947 	timeout_set(&sc->sc_kstat_tick, mvneta_kstat_tick, sc);
1948 
1949 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "mvneta-stats", 0,
1950 	    KSTAT_T_KV, 0);
1951 	if (ks == NULL)
1952 		return;
1953 
1954 	kvs = mallocarray(nitems(mvneta_counters), sizeof(*kvs),
1955 	    M_DEVBUF, M_WAITOK|M_ZERO);
1956 	for (i = 0; i < nitems(mvneta_counters); i++) {
1957 		const struct mvneta_counter *c = &mvneta_counters[i];
1958 		kstat_kv_unit_init(&kvs[i], c->name,
1959 		    KSTAT_KV_T_COUNTER64, c->unit);
1960 	}
1961 
1962 	ks->ks_softc = sc;
1963 	ks->ks_data = kvs;
1964 	ks->ks_datalen = nitems(mvneta_counters) * sizeof(*kvs);
1965 	ks->ks_read = mvneta_kstat_read;
1966 	kstat_set_mutex(ks, &sc->sc_kstat_lock);
1967 
1968 	kstat_install(ks);
1969 
1970 	sc->sc_kstat = ks;
1971 
1972 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1973 }
1974 
1975 #endif
1976