xref: /openbsd-src/sys/dev/fdt/if_mvneta.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: if_mvneta.c,v 1.27 2022/07/19 21:49:22 jmatthew Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 #include "kstat.h"
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/systm.h>
35 #include <sys/endian.h>
36 #include <sys/errno.h>
37 #include <sys/kernel.h>
38 #include <sys/mutex.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <uvm/uvm_extern.h>
42 #include <sys/mbuf.h>
43 #include <sys/kstat.h>
44 
45 #include <machine/bus.h>
46 #include <machine/cpufunc.h>
47 #include <machine/fdt.h>
48 
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_clock.h>
51 #include <dev/ofw/ofw_misc.h>
52 #include <dev/ofw/ofw_pinctrl.h>
53 #include <dev/ofw/fdt.h>
54 
55 #include <dev/fdt/if_mvnetareg.h>
56 
57 #ifdef __armv7__
58 #include <armv7/marvell/mvmbusvar.h>
59 #endif
60 
61 #include <net/if.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 
65 #include <net/bpf.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/if_ether.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76 
77 #ifdef MVNETA_DEBUG
78 #define DPRINTF(x)	if (mvneta_debug) printf x
79 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
80 int mvneta_debug = MVNETA_DEBUG;
81 #else
82 #define DPRINTF(x)
83 #define DPRINTFN(n,x)
84 #endif
85 
86 #define MVNETA_READ(sc, reg) \
87 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
88 #define MVNETA_WRITE(sc, reg, val) \
89 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
90 #define MVNETA_READ_FILTER(sc, reg, val, c) \
91 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
92 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
93 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
94 
95 #define MVNETA_LINKUP_READ(sc) \
96 	MVNETA_READ(sc, MVNETA_PS0)
97 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
98 
99 #define MVNETA_TX_RING_CNT	256
100 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
101 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
102 #define MVNETA_TX_QUEUE_CNT	1
103 #define MVNETA_RX_RING_CNT	256
104 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
105 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
106 #define MVNETA_RX_QUEUE_CNT	1
107 
108 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
109 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
110 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
111 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
112 
113 #define MVNETA_NTXSEG		30
114 
115 struct mvneta_dmamem {
116 	bus_dmamap_t		mdm_map;
117 	bus_dma_segment_t	mdm_seg;
118 	size_t			mdm_size;
119 	caddr_t			mdm_kva;
120 };
121 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
122 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
123 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
124 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
125 
126 struct mvneta_buf {
127 	bus_dmamap_t	tb_map;
128 	struct mbuf	*tb_m;
129 };
130 
131 struct mvneta_softc {
132 	struct device sc_dev;
133 	struct mii_bus *sc_mdio;
134 
135 	bus_space_tag_t sc_iot;
136 	bus_space_handle_t sc_ioh;
137 	bus_dma_tag_t sc_dmat;
138 	void *sc_ih;
139 
140 	uint64_t		sc_clk_freq;
141 
142 	struct arpcom sc_ac;
143 #define sc_enaddr sc_ac.ac_enaddr
144 	struct mii_data sc_mii;
145 #define sc_media sc_mii.mii_media
146 
147 	struct timeout sc_tick_ch;
148 
149 	struct mvneta_dmamem	*sc_txring;
150 	struct mvneta_buf	*sc_txbuf;
151 	struct mvneta_tx_desc	*sc_txdesc;
152 	unsigned int		 sc_tx_prod;	/* next free tx desc */
153 	unsigned int		 sc_tx_cons;	/* first tx desc sent */
154 
155 	struct mvneta_dmamem	*sc_rxring;
156 	struct mvneta_buf	*sc_rxbuf;
157 	struct mvneta_rx_desc	*sc_rxdesc;
158 	unsigned int		 sc_rx_prod;	/* next rx desc to fill */
159 	unsigned int		 sc_rx_cons;	/* next rx desc recvd */
160 	struct if_rxring	 sc_rx_ring;
161 
162 	enum {
163 		PHY_MODE_QSGMII,
164 		PHY_MODE_SGMII,
165 		PHY_MODE_RGMII,
166 		PHY_MODE_RGMII_ID,
167 		PHY_MODE_1000BASEX,
168 		PHY_MODE_2500BASEX,
169 	}			 sc_phy_mode;
170 	int			 sc_fixed_link;
171 	int			 sc_inband_status;
172 	int			 sc_phy;
173 	int			 sc_phyloc;
174 	int			 sc_link;
175 	int			 sc_sfp;
176 	int			 sc_node;
177 
178 #if NKSTAT > 0
179 	struct mutex		 sc_kstat_lock;
180 	struct timeout		 sc_kstat_tick;
181 	struct kstat		*sc_kstat;
182 #endif
183 };
184 
185 
186 int mvneta_miibus_readreg(struct device *, int, int);
187 void mvneta_miibus_writereg(struct device *, int, int, int);
188 void mvneta_miibus_statchg(struct device *);
189 
190 void mvneta_wininit(struct mvneta_softc *);
191 
192 /* Gigabit Ethernet Port part functions */
193 int mvneta_match(struct device *, void *, void *);
194 void mvneta_attach(struct device *, struct device *, void *);
195 void mvneta_attach_deferred(struct device *);
196 
197 void mvneta_tick(void *);
198 int mvneta_intr(void *);
199 
200 void mvneta_start(struct ifqueue *);
201 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
202 void mvneta_inband_statchg(struct mvneta_softc *);
203 void mvneta_port_change(struct mvneta_softc *);
204 void mvneta_port_up(struct mvneta_softc *);
205 int mvneta_up(struct mvneta_softc *);
206 void mvneta_down(struct mvneta_softc *);
207 void mvneta_watchdog(struct ifnet *);
208 
209 int mvneta_mediachange(struct ifnet *);
210 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
211 
212 void mvneta_rx_proc(struct mvneta_softc *);
213 void mvneta_tx_proc(struct mvneta_softc *);
214 uint8_t mvneta_crc8(const uint8_t *, size_t);
215 void mvneta_iff(struct mvneta_softc *);
216 
217 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
218     bus_size_t, bus_size_t);
219 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
220 void mvneta_fill_rx_ring(struct mvneta_softc *);
221 
222 #if NKSTAT > 0
223 void		mvneta_kstat_attach(struct mvneta_softc *);
224 #endif
225 
226 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff");
227 
228 struct cfdriver mvneta_cd = {
229 	NULL, "mvneta", DV_IFNET
230 };
231 
232 const struct cfattach mvneta_ca = {
233 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
234 };
235 
236 int
237 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
238 {
239 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
240 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
241 }
242 
243 void
244 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
245 {
246 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
247 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
248 }
249 
250 void
251 mvneta_miibus_statchg(struct device *self)
252 {
253 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
254 
255 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
256 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
257 
258 		panc &= ~(MVNETA_PANC_SETMIISPEED |
259 			  MVNETA_PANC_SETGMIISPEED |
260 			  MVNETA_PANC_SETFULLDX);
261 
262 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
263 		case IFM_1000_SX:
264 		case IFM_1000_LX:
265 		case IFM_1000_CX:
266 		case IFM_1000_T:
267 			panc |= MVNETA_PANC_SETGMIISPEED;
268 			break;
269 		case IFM_100_TX:
270 			panc |= MVNETA_PANC_SETMIISPEED;
271 			break;
272 		case IFM_10_T:
273 			break;
274 		}
275 
276 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
277 			panc |= MVNETA_PANC_SETFULLDX;
278 
279 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
280 	}
281 
282 	mvneta_port_change(sc);
283 }
284 
285 void
286 mvneta_inband_statchg(struct mvneta_softc *sc)
287 {
288 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
289 	uint32_t reg;
290 
291 	sc->sc_mii.mii_media_status = IFM_AVALID;
292 	sc->sc_mii.mii_media_active = IFM_ETHER;
293 
294 	reg = MVNETA_READ(sc, MVNETA_PS0);
295 	if (reg & MVNETA_PS0_LINKUP)
296 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
297 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
298 		sc->sc_mii.mii_media_active |= subtype;
299 	else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
300 		sc->sc_mii.mii_media_active |= subtype;
301 	else if (reg & MVNETA_PS0_GMIISPEED)
302 		sc->sc_mii.mii_media_active |= IFM_1000_T;
303 	else if (reg & MVNETA_PS0_MIISPEED)
304 		sc->sc_mii.mii_media_active |= IFM_100_TX;
305 	else
306 		sc->sc_mii.mii_media_active |= IFM_10_T;
307 	if (reg & MVNETA_PS0_FULLDX)
308 		sc->sc_mii.mii_media_active |= IFM_FDX;
309 
310 	mvneta_port_change(sc);
311 }
312 
313 void
314 mvneta_enaddr_write(struct mvneta_softc *sc)
315 {
316 	uint32_t maddrh, maddrl;
317 	maddrh  = sc->sc_enaddr[0] << 24;
318 	maddrh |= sc->sc_enaddr[1] << 16;
319 	maddrh |= sc->sc_enaddr[2] << 8;
320 	maddrh |= sc->sc_enaddr[3];
321 	maddrl  = sc->sc_enaddr[4] << 8;
322 	maddrl |= sc->sc_enaddr[5];
323 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
324 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
325 }
326 
327 void
328 mvneta_wininit(struct mvneta_softc *sc)
329 {
330 	uint32_t en;
331 	int i;
332 
333 #ifdef __armv7__
334 	if (mvmbus_dram_info == NULL)
335 		panic("%s: mbus dram information not set up",
336 		    sc->sc_dev.dv_xname);
337 #endif
338 
339 	for (i = 0; i < MVNETA_NWINDOW; i++) {
340 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
341 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
342 
343 		if (i < MVNETA_NREMAP)
344 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
345 	}
346 
347 	en = MVNETA_BARE_EN_MASK;
348 
349 #ifdef __armv7__
350 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
351 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
352 
353 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
354 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
355 		    MVNETA_BASEADDR_ATTR(win->attr)	|
356 		    MVNETA_BASEADDR_BASE(win->base));
357 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
358 
359 		en &= ~(1 << i);
360 	}
361 #else
362 	MVNETA_WRITE(sc, MVNETA_S(0), MVNETA_S_SIZE(0));
363 	en &= ~(1 << 0);
364 #endif
365 
366 	MVNETA_WRITE(sc, MVNETA_BARE, en);
367 }
368 
369 #define COMPHY_SIP_POWER_ON	0x82000001
370 #define COMPHY_SIP_POWER_OFF	0x82000002
371 #define COMPHY_SPEED(x)		((x) << 2)
372 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
373 #define  COMPHY_SPEED_2_5G		1
374 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
375 #define  COMPHY_SPEED_5G		3
376 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
377 #define  COMPHY_SPEED_6G		5
378 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
379 #define COMPHY_UNIT(x)		((x) << 8)
380 #define COMPHY_MODE(x)		((x) << 12)
381 #define  COMPHY_MODE_SATA		1
382 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
383 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
384 #define  COMPHY_MODE_USB3H		4
385 #define  COMPHY_MODE_USB3D		5
386 #define  COMPHY_MODE_PCIE		6
387 #define  COMPHY_MODE_RXAUI		7
388 #define  COMPHY_MODE_XFI		8
389 #define  COMPHY_MODE_SFI		9
390 #define  COMPHY_MODE_USB3		10
391 
392 void
393 mvneta_comphy_init(struct mvneta_softc *sc)
394 {
395 	int node, phys[2], lane, unit;
396 	uint32_t mode;
397 
398 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
399 	    sizeof(phys))
400 		return;
401 	node = OF_getnodebyphandle(phys[0]);
402 	if (!node)
403 		return;
404 
405 	lane = OF_getpropint(node, "reg", 0);
406 	unit = phys[1];
407 
408 	switch (sc->sc_phy_mode) {
409 	case PHY_MODE_1000BASEX:
410 	case PHY_MODE_SGMII:
411 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
412 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
413 		    COMPHY_UNIT(unit);
414 		break;
415 	case PHY_MODE_2500BASEX:
416 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
417 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
418 		    COMPHY_UNIT(unit);
419 		break;
420 	default:
421 		return;
422 	}
423 
424 	smc_call(COMPHY_SIP_POWER_ON, lane, mode, 0);
425 }
426 
427 int
428 mvneta_match(struct device *parent, void *cfdata, void *aux)
429 {
430 	struct fdt_attach_args *faa = aux;
431 
432 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
433 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
434 }
435 
436 void
437 mvneta_attach(struct device *parent, struct device *self, void *aux)
438 {
439 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
440 	struct fdt_attach_args *faa = aux;
441 	uint32_t ctl0, ctl2, ctl4, panc;
442 	struct ifnet *ifp;
443 	int i, len, node;
444 	char *phy_mode;
445 	char *managed;
446 
447 	sc->sc_iot = faa->fa_iot;
448 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
449 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
450 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
451 		printf("%s: cannot map registers\n", self->dv_xname);
452 		return;
453 	}
454 	sc->sc_dmat = faa->fa_dmat;
455 	sc->sc_node = faa->fa_node;
456 
457 	clock_enable(faa->fa_node, NULL);
458 	sc->sc_clk_freq = clock_get_frequency_idx(faa->fa_node, 0);
459 
460 	pinctrl_byname(faa->fa_node, "default");
461 
462 	len = OF_getproplen(faa->fa_node, "phy-mode");
463 	if (len <= 0) {
464 		printf(": cannot extract phy-mode\n");
465 		return;
466 	}
467 
468 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
469 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
470 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
471 		sc->sc_phy_mode = PHY_MODE_QSGMII;
472 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
473 		sc->sc_phy_mode = PHY_MODE_SGMII;
474 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
475 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
476 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
477 		sc->sc_phy_mode = PHY_MODE_RGMII;
478 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
479 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
480 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
481 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
482 	else {
483 		printf(": cannot use phy-mode %s\n", phy_mode);
484 		return;
485 	}
486 	free(phy_mode, M_TEMP, len);
487 
488 	/* TODO: check child's name to be "fixed-link" */
489 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
490 	    OF_child(faa->fa_node))
491 		sc->sc_fixed_link = 1;
492 
493 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
494 		managed = malloc(len, M_TEMP, M_WAITOK);
495 		OF_getprop(faa->fa_node, "managed", managed, len);
496 		if (!strncmp(managed, "in-band-status",
497 		    strlen("in-band-status"))) {
498 			sc->sc_fixed_link = 1;
499 			sc->sc_inband_status = 1;
500 		}
501 		free(managed, M_TEMP, len);
502 	}
503 
504 	if (!sc->sc_fixed_link) {
505 		sc->sc_phy = OF_getpropint(faa->fa_node, "phy", 0);
506 		node = OF_getnodebyphandle(sc->sc_phy);
507 		if (!node) {
508 			printf(": cannot find phy in fdt\n");
509 			return;
510 		}
511 
512 		if ((sc->sc_phyloc = OF_getpropint(node, "reg", -1)) == -1) {
513 			printf(": cannot extract phy addr\n");
514 			return;
515 		}
516 	}
517 
518 	mvneta_wininit(sc);
519 
520 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
521 	    ETHER_ADDR_LEN) {
522 		OF_getprop(faa->fa_node, "local-mac-address",
523 		    sc->sc_enaddr, ETHER_ADDR_LEN);
524 		mvneta_enaddr_write(sc);
525 	} else {
526 		uint32_t maddrh, maddrl;
527 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
528 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
529 		if (maddrh || maddrl) {
530 			sc->sc_enaddr[0] = maddrh >> 24;
531 			sc->sc_enaddr[1] = maddrh >> 16;
532 			sc->sc_enaddr[2] = maddrh >> 8;
533 			sc->sc_enaddr[3] = maddrh >> 0;
534 			sc->sc_enaddr[4] = maddrl >> 8;
535 			sc->sc_enaddr[5] = maddrl >> 0;
536 		} else
537 			ether_fakeaddr(&sc->sc_ac.ac_if);
538 	}
539 
540 	sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0);
541 
542 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
543 
544 	/* disable port */
545 	MVNETA_WRITE(sc, MVNETA_PMACC0,
546 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
547 	delay(200);
548 
549 	/* clear all cause registers */
550 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
551 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
552 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
553 
554 	/* mask all interrupts */
555 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
556 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
557 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
558 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
559 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
560 
561 	/* enable MBUS Retry bit16 */
562 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
563 
564 	/* enable access for CPU0 */
565 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
566 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
567 
568 	/* reset RX and TX DMAs */
569 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
570 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
571 
572 	/* disable legacy WRR, disable EJP, release from reset */
573 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
574 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
575 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
576 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
577 	}
578 
579 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
580 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
581 
582 	/* set port acceleration mode */
583 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
584 
585 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
586 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
587 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
588 
589 	/* Set SDC register except IPGINT bits */
590 	MVNETA_WRITE(sc, MVNETA_SDC,
591 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
592 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
593 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
594 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
595 
596 	/* XXX: Disable PHY polling in hardware */
597 	MVNETA_WRITE(sc, MVNETA_EUC,
598 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
599 
600 	/* clear uni-/multicast tables */
601 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
602 	memset(dfut, 0, sizeof(dfut));
603 	memset(dfsmt, 0, sizeof(dfut));
604 	memset(dfomt, 0, sizeof(dfut));
605 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
606 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
607 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
608 
609 	MVNETA_WRITE(sc, MVNETA_PIE,
610 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
611 
612 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
613 
614 	/* Setup phy. */
615 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
616 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
617 	ctl4 = MVNETA_READ(sc, MVNETA_PMACC4);
618 	panc = MVNETA_READ(sc, MVNETA_PANC);
619 
620 	/* Force link down to change in-band settings. */
621 	panc &= ~MVNETA_PANC_FORCELINKPASS;
622 	panc |= MVNETA_PANC_FORCELINKFAIL;
623 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
624 
625 	mvneta_comphy_init(sc);
626 
627 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
628 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
629 	ctl4 &= ~(MVNETA_PMACC4_SHORT_PREAMBLE);
630 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
631 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
632 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
633 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
634 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
635 
636 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
637 	switch (sc->sc_phy_mode) {
638 	case PHY_MODE_QSGMII:
639 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
640 		    MVNETA_SERDESCFG_QSGMII_PROTO);
641 		ctl2 |= MVNETA_PMACC2_PCSEN;
642 		break;
643 	case PHY_MODE_SGMII:
644 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
645 		    MVNETA_SERDESCFG_SGMII_PROTO);
646 		ctl2 |= MVNETA_PMACC2_PCSEN;
647 		break;
648 	case PHY_MODE_1000BASEX:
649 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
650 		    MVNETA_SERDESCFG_SGMII_PROTO);
651 		ctl2 |= MVNETA_PMACC2_PCSEN;
652 		break;
653 	case PHY_MODE_2500BASEX:
654 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
655 		    MVNETA_SERDESCFG_HSGMII_PROTO);
656 		ctl2 |= MVNETA_PMACC2_PCSEN;
657 		ctl4 |= MVNETA_PMACC4_SHORT_PREAMBLE;
658 		break;
659 	default:
660 		break;
661 	}
662 
663 	/* Use Auto-Negotiation for Inband Status only */
664 	if (sc->sc_inband_status) {
665 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
666 		    MVNETA_PANC_FORCELINKPASS);
667 		/* TODO: read mode from SFP */
668 		if (1) {
669 			/* 802.3z */
670 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
671 			panc |= (MVNETA_PANC_INBANDANEN |
672 			    MVNETA_PANC_SETGMIISPEED |
673 			    MVNETA_PANC_SETFULLDX);
674 		} else {
675 			/* SGMII */
676 			ctl2 |= MVNETA_PMACC2_INBANDAN;
677 			panc |= (MVNETA_PANC_INBANDANEN |
678 			    MVNETA_PANC_ANSPEEDEN |
679 			    MVNETA_PANC_ANDUPLEXEN);
680 		}
681 		MVNETA_WRITE(sc, MVNETA_OMSCD,
682 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
683 	} else {
684 		MVNETA_WRITE(sc, MVNETA_OMSCD,
685 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
686 	}
687 
688 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
689 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
690 	MVNETA_WRITE(sc, MVNETA_PMACC4, ctl4);
691 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
692 
693 	/* Port reset */
694 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
695 		;
696 
697 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
698 	    mvneta_intr, sc, sc->sc_dev.dv_xname);
699 
700 	ifp = &sc->sc_ac.ac_if;
701 	ifp->if_softc = sc;
702 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
703 	ifp->if_xflags = IFXF_MPSAFE;
704 	ifp->if_qstart = mvneta_start;
705 	ifp->if_ioctl = mvneta_ioctl;
706 	ifp->if_watchdog = mvneta_watchdog;
707 	ifp->if_capabilities = IFCAP_VLAN_MTU;
708 
709 #if notyet
710 	/*
711 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
712 	 */
713 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
714 				IFCAP_CSUM_UDPv4;
715 
716 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
717 	/*
718 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
719 	 */
720 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
721 #endif
722 
723 	ifq_set_maxlen(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
724 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
725 
726 	/*
727 	 * Do MII setup.
728 	 */
729 	sc->sc_mii.mii_ifp = ifp;
730 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
731 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
732 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
733 
734 	ifmedia_init(&sc->sc_mii.mii_media, 0,
735 	    mvneta_mediachange, mvneta_mediastatus);
736 
737 	config_defer(self, mvneta_attach_deferred);
738 }
739 
740 void
741 mvneta_attach_deferred(struct device *self)
742 {
743 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
744 	struct ifnet *ifp = &sc->sc_ac.ac_if;
745 
746 	if (!sc->sc_fixed_link) {
747 		sc->sc_mdio = mii_byphandle(sc->sc_phy);
748 		if (sc->sc_mdio == NULL) {
749 			printf("%s: mdio bus not yet attached\n", self->dv_xname);
750 			return;
751 		}
752 
753 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
754 		    MII_OFFSET_ANY, 0);
755 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
756 			printf("%s: no PHY found!\n", self->dv_xname);
757 			ifmedia_add(&sc->sc_mii.mii_media,
758 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
759 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
760 		} else
761 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
762 	} else {
763 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
764 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
765 
766 		if (sc->sc_inband_status) {
767 			switch (sc->sc_phy_mode) {
768 			case PHY_MODE_1000BASEX:
769 				sc->sc_mii.mii_media_active =
770 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
771 				break;
772 			case PHY_MODE_2500BASEX:
773 				sc->sc_mii.mii_media_active =
774 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
775 				break;
776 			default:
777 				break;
778 			}
779 			mvneta_inband_statchg(sc);
780 		} else {
781 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
782 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
783 			mvneta_miibus_statchg(self);
784 		}
785 
786 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
787 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
788 	}
789 
790 	/*
791 	 * Call MI attach routines.
792 	 */
793 	if_attach(ifp);
794 	ether_ifattach(ifp);
795 
796 #if NKSTAT > 0
797 	mvneta_kstat_attach(sc);
798 #endif
799 }
800 
801 void
802 mvneta_tick(void *arg)
803 {
804 	struct mvneta_softc *sc = arg;
805 	struct mii_data *mii = &sc->sc_mii;
806 	int s;
807 
808 	s = splnet();
809 	mii_tick(mii);
810 	splx(s);
811 
812 	timeout_add_sec(&sc->sc_tick_ch, 1);
813 }
814 
815 int
816 mvneta_intr(void *arg)
817 {
818 	struct mvneta_softc *sc = arg;
819 	struct ifnet *ifp = &sc->sc_ac.ac_if;
820 	uint32_t ic, misc;
821 
822 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
823 
824 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
825 		KERNEL_LOCK();
826 		misc = MVNETA_READ(sc, MVNETA_PMIC);
827 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
828 		if (sc->sc_inband_status && (misc &
829 		    (MVNETA_PMI_PHYSTATUSCHNG |
830 		    MVNETA_PMI_LINKCHANGE |
831 		    MVNETA_PMI_PSCSYNCCHNG))) {
832 			mvneta_inband_statchg(sc);
833 		}
834 		KERNEL_UNLOCK();
835 	}
836 
837 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
838 		return 1;
839 
840 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
841 		mvneta_tx_proc(sc);
842 
843 	if (ISSET(ic, MVNETA_PRXTXTI_RBICTAPQ(0) | MVNETA_PRXTXTI_RDTAQ(0)))
844 		mvneta_rx_proc(sc);
845 
846 	return 1;
847 }
848 
849 static inline int
850 mvneta_load_mbuf(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m)
851 {
852 	int error;
853 
854 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
855 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
856 	switch (error) {
857 	case EFBIG:
858 		error = m_defrag(m, M_DONTWAIT);
859 		if (error != 0)
860 			break;
861 
862 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
863 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
864 		if (error != 0)
865 			break;
866 
867 		/* FALLTHROUGH */
868 	case 0:
869 		return (0);
870 
871 	default:
872 		break;
873 	}
874 
875         return (error);
876 }
877 
878 static inline void
879 mvneta_encap(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m,
880     unsigned int prod)
881 {
882 	struct mvneta_tx_desc *txd;
883 	uint32_t cmdsts;
884 	unsigned int i;
885 
886 	cmdsts = MVNETA_TX_FIRST_DESC | MVNETA_TX_ZERO_PADDING |
887 	    MVNETA_TX_L4_CSUM_NOT;
888 #if notyet
889 	int m_csumflags;
890 	if (m_csumflags & M_CSUM_IPv4)
891 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
892 	if (m_csumflags & M_CSUM_TCPv4)
893 		cmdsts |=
894 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
895 	if (m_csumflags & M_CSUM_UDPv4)
896 		cmdsts |=
897 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
898 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
899 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
900 
901 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
902 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
903 	}
904 #endif
905 
906 	for (i = 0; i < map->dm_nsegs; i++) {
907 		txd = &sc->sc_txdesc[prod];
908 		txd->bytecnt = map->dm_segs[i].ds_len;
909 		txd->l4ichk = 0;
910 		txd->cmdsts = cmdsts;
911 		txd->nextdescptr = 0;
912 		txd->bufptr = map->dm_segs[i].ds_addr;
913 		txd->_padding[0] = 0;
914 		txd->_padding[1] = 0;
915 		txd->_padding[2] = 0;
916 		txd->_padding[3] = 0;
917 
918 		prod = MVNETA_TX_RING_NEXT(prod);
919 		cmdsts = 0;
920 	}
921 	txd->cmdsts |= MVNETA_TX_LAST_DESC;
922 }
923 
924 static inline void
925 mvneta_sync_txring(struct mvneta_softc *sc, int ops)
926 {
927 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
928 	    MVNETA_DMA_LEN(sc->sc_txring), ops);
929 }
930 
931 void
932 mvneta_start(struct ifqueue *ifq)
933 {
934 	struct ifnet *ifp = ifq->ifq_if;
935 	struct mvneta_softc *sc = ifp->if_softc;
936 	unsigned int prod, nprod, free, used = 0, nused;
937 	struct mbuf *m;
938 	bus_dmamap_t map;
939 
940 	/* If Link is DOWN, can't start TX */
941 	if (!MVNETA_IS_LINKUP(sc)) {
942 		ifq_purge(ifq);
943 		return;
944 	}
945 
946 	mvneta_sync_txring(sc, BUS_DMASYNC_POSTWRITE);
947 
948 	prod = sc->sc_tx_prod;
949 	free = MVNETA_TX_RING_CNT - (prod - sc->sc_tx_cons);
950 
951 	for (;;) {
952 		if (free < MVNETA_NTXSEG - 1) {
953 			ifq_set_oactive(ifq);
954 			break;
955 		}
956 
957 		m = ifq_dequeue(ifq);
958 		if (m == NULL)
959 			break;
960 
961 		map = sc->sc_txbuf[prod].tb_map;
962 		if (mvneta_load_mbuf(sc, map, m) != 0) {
963 			m_freem(m);
964 			ifp->if_oerrors++; /* XXX atomic */
965 			continue;
966 		}
967 
968 #if NBPFILTER > 0
969 		if (ifp->if_bpf)
970 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
971 #endif
972 
973 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
974 		    BUS_DMASYNC_PREWRITE);
975 
976 		mvneta_encap(sc, map, m, prod);
977 
978 		if (map->dm_nsegs > 1) {
979 			nprod = (prod + (map->dm_nsegs - 1)) %
980 			    MVNETA_TX_RING_CNT;
981 			sc->sc_txbuf[prod].tb_map = sc->sc_txbuf[nprod].tb_map;
982 			prod = nprod;
983 			sc->sc_txbuf[prod].tb_map = map;
984 		}
985 		sc->sc_txbuf[prod].tb_m = m;
986 		prod = MVNETA_TX_RING_NEXT(prod);
987 
988 		free -= map->dm_nsegs;
989 
990 		nused = used + map->dm_nsegs;
991 		if (nused > MVNETA_PTXSU_MAX) {
992 			mvneta_sync_txring(sc,
993 			    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
994 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
995 			    MVNETA_PTXSU_NOWD(used));
996 			used = map->dm_nsegs;
997 		} else
998 			used = nused;
999 	}
1000 
1001 	mvneta_sync_txring(sc, BUS_DMASYNC_PREWRITE);
1002 
1003 	sc->sc_tx_prod = prod;
1004 	if (used)
1005 		MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NOWD(used));
1006 }
1007 
1008 int
1009 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1010 {
1011 	struct mvneta_softc *sc = ifp->if_softc;
1012 	struct ifreq *ifr = (struct ifreq *)addr;
1013 	int s, error = 0;
1014 
1015 	s = splnet();
1016 
1017 	switch (cmd) {
1018 	case SIOCSIFADDR:
1019 		ifp->if_flags |= IFF_UP;
1020 		/* FALLTHROUGH */
1021 	case SIOCSIFFLAGS:
1022 		if (ifp->if_flags & IFF_UP) {
1023 			if (ifp->if_flags & IFF_RUNNING)
1024 				error = ENETRESET;
1025 			else
1026 				mvneta_up(sc);
1027 		} else {
1028 			if (ifp->if_flags & IFF_RUNNING)
1029 				mvneta_down(sc);
1030 		}
1031 		break;
1032 	case SIOCGIFMEDIA:
1033 	case SIOCSIFMEDIA:
1034 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
1035 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1036 		break;
1037 	case SIOCGIFRXR:
1038 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1039 		    NULL, MCLBYTES, &sc->sc_rx_ring);
1040 		break;
1041 	case SIOCGIFSFFPAGE:
1042 		error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR);
1043 		if (error != 0)
1044 			break;
1045 
1046 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1047 		rw_exit(&mvneta_sff_lock);
1048 		break;
1049 	default:
1050 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
1051 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1052 		break;
1053 	}
1054 
1055 	if (error == ENETRESET) {
1056 		if (ifp->if_flags & IFF_RUNNING)
1057 			mvneta_iff(sc);
1058 		error = 0;
1059 	}
1060 
1061 	splx(s);
1062 
1063 	return error;
1064 }
1065 
1066 void
1067 mvneta_port_change(struct mvneta_softc *sc)
1068 {
1069 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
1070 		sc->sc_link = !sc->sc_link;
1071 
1072 		if (sc->sc_link) {
1073 			if (!sc->sc_inband_status) {
1074 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1075 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
1076 				panc |= MVNETA_PANC_FORCELINKPASS;
1077 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1078 			}
1079 			mvneta_port_up(sc);
1080 		} else {
1081 			if (!sc->sc_inband_status) {
1082 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1083 				panc &= ~MVNETA_PANC_FORCELINKPASS;
1084 				panc |= MVNETA_PANC_FORCELINKFAIL;
1085 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1086 			}
1087 		}
1088 	}
1089 }
1090 
1091 void
1092 mvneta_port_up(struct mvneta_softc *sc)
1093 {
1094 	/* Enable port RX/TX. */
1095 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
1096 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
1097 }
1098 
1099 int
1100 mvneta_up(struct mvneta_softc *sc)
1101 {
1102 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1103 	struct mvneta_buf *txb, *rxb;
1104 	int i;
1105 
1106 	DPRINTFN(2, ("mvneta_up\n"));
1107 
1108 	/* Allocate Tx descriptor ring. */
1109 	sc->sc_txring = mvneta_dmamem_alloc(sc,
1110 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
1111 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
1112 
1113 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
1114 	    M_DEVBUF, M_WAITOK);
1115 
1116 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1117 		txb = &sc->sc_txbuf[i];
1118 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
1119 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1120 		txb->tb_m = NULL;
1121 	}
1122 
1123 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1124 
1125 	/* Allocate Rx descriptor ring. */
1126 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
1127 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
1128 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
1129 
1130 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
1131 	    M_DEVBUF, M_WAITOK);
1132 
1133 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1134 		rxb = &sc->sc_rxbuf[i];
1135 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1136 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1137 		rxb->tb_m = NULL;
1138 	}
1139 
1140 	/* Set Rx descriptor ring data. */
1141 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
1142 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
1143 	    ((MCLBYTES >> 3) << 19));
1144 
1145 	if (sc->sc_clk_freq != 0) {
1146 		/*
1147 		 * Use the Non Occupied Descriptors Threshold to
1148 		 * interrupt when the descriptors granted by rxr are
1149 		 * used up, otherwise wait until the RX Interrupt
1150 		 * Time Threshold is reached.
1151 		 */
1152 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0),
1153 		    MVNETA_PRXDQTH_ODT(MVNETA_RX_RING_CNT) |
1154 		    MVNETA_PRXDQTH_NODT(2));
1155 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), sc->sc_clk_freq / 4000);
1156 	} else {
1157 		/* Time based moderation is hard without a clock */
1158 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
1159 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), 0);
1160 	}
1161 
1162 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
1163 
1164 	/* Set Tx queue bandwidth. */
1165 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
1166 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
1167 
1168 	/* Set Tx descriptor ring data. */
1169 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
1170 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
1171 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT) |
1172 	    MVNETA_PTXDQS_TBT(MIN(MVNETA_TX_RING_CNT / 2, ifp->if_txmit)));
1173 
1174 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1175 
1176 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
1177 	mvneta_fill_rx_ring(sc);
1178 
1179 	/* TODO: correct frame size */
1180 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1181 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
1182 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
1183 
1184 	/* set max MTU */
1185 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
1186 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
1187 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
1188 
1189 	/* enable port */
1190 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1191 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
1192 
1193 	mvneta_enaddr_write(sc);
1194 
1195 	/* Program promiscuous mode and multicast filters. */
1196 	mvneta_iff(sc);
1197 
1198 	if (!sc->sc_fixed_link)
1199 		mii_mediachg(&sc->sc_mii);
1200 
1201 	if (sc->sc_link)
1202 		mvneta_port_up(sc);
1203 
1204 	/* Enable interrupt masks */
1205 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
1206 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_RDTAQ(0) |
1207 	    MVNETA_PRXTXTI_PMISCICSUMMARY);
1208 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1209 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
1210 
1211 	timeout_add_sec(&sc->sc_tick_ch, 1);
1212 
1213 	ifp->if_flags |= IFF_RUNNING;
1214 	ifq_clr_oactive(&ifp->if_snd);
1215 
1216 	return 0;
1217 }
1218 
1219 void
1220 mvneta_down(struct mvneta_softc *sc)
1221 {
1222 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1223 	uint32_t reg, txinprog, txfifoemp;
1224 	struct mvneta_buf *txb, *rxb;
1225 	int i, cnt;
1226 
1227 	DPRINTFN(2, ("mvneta_down\n"));
1228 
1229 	timeout_del(&sc->sc_tick_ch);
1230 	ifp->if_flags &= ~IFF_RUNNING;
1231 	intr_barrier(sc->sc_ih);
1232 
1233 	/* Stop Rx port activity. Check port Rx activity. */
1234 	reg = MVNETA_READ(sc, MVNETA_RQC);
1235 	if (reg & MVNETA_RQC_ENQ_MASK)
1236 		/* Issue stop command for active channels only */
1237 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1238 
1239 	/* Stop Tx port activity. Check port Tx activity. */
1240 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1241 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1242 
1243 	txinprog = MVNETA_PS_TXINPROG_(0);
1244 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1245 
1246 #define RX_DISABLE_TIMEOUT		0x1000000
1247 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1248 	/* Wait for all Rx activity to terminate. */
1249 	cnt = 0;
1250 	do {
1251 		if (cnt >= RX_DISABLE_TIMEOUT) {
1252 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1253 			    sc->sc_dev.dv_xname, reg);
1254 			break;
1255 		}
1256 		cnt++;
1257 
1258 		/*
1259 		 * Check Receive Queue Command register that all Rx queues
1260 		 * are stopped
1261 		 */
1262 		reg = MVNETA_READ(sc, MVNETA_RQC);
1263 	} while (reg & 0xff);
1264 
1265 	/* Double check to verify that TX FIFO is empty */
1266 	cnt = 0;
1267 	while (1) {
1268 		do {
1269 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1270 				printf("%s: timeout for TX FIFO empty. status "
1271 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1272 				break;
1273 			}
1274 			cnt++;
1275 
1276 			reg = MVNETA_READ(sc, MVNETA_PS);
1277 		} while (!(reg & txfifoemp) || reg & txinprog);
1278 
1279 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1280 			break;
1281 
1282 		/* Double check */
1283 		reg = MVNETA_READ(sc, MVNETA_PS);
1284 		if (reg & txfifoemp && !(reg & txinprog))
1285 			break;
1286 		else
1287 			printf("%s: TX FIFO empty double check failed."
1288 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1289 			    cnt, reg);
1290 	}
1291 
1292 	delay(200);
1293 
1294 	/* disable port */
1295 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1296 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1297 	delay(200);
1298 
1299 	/* mask all interrupts */
1300 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1301 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1302 
1303 	/* clear all cause registers */
1304 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1305 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1306 
1307 	/* Free RX and TX mbufs still in the queues. */
1308 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1309 		txb = &sc->sc_txbuf[i];
1310 		if (txb->tb_m) {
1311 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1312 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1313 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1314 			m_freem(txb->tb_m);
1315 		}
1316 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1317 	}
1318 
1319 	mvneta_dmamem_free(sc, sc->sc_txring);
1320 	free(sc->sc_txbuf, M_DEVBUF, 0);
1321 
1322 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1323 		rxb = &sc->sc_rxbuf[i];
1324 		if (rxb->tb_m) {
1325 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1326 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1327 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1328 			m_freem(rxb->tb_m);
1329 		}
1330 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1331 	}
1332 
1333 	mvneta_dmamem_free(sc, sc->sc_rxring);
1334 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1335 
1336 	/* reset RX and TX DMAs */
1337 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1338 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1339 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1340 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1341 
1342 	ifq_clr_oactive(&ifp->if_snd);
1343 }
1344 
1345 void
1346 mvneta_watchdog(struct ifnet *ifp)
1347 {
1348 	struct mvneta_softc *sc = ifp->if_softc;
1349 
1350 	/*
1351 	 * Reclaim first as there is a possibility of losing Tx completion
1352 	 * interrupts.
1353 	 */
1354 	mvneta_tx_proc(sc);
1355 	if (sc->sc_tx_prod != sc->sc_tx_cons) {
1356 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1357 
1358 		ifp->if_oerrors++;
1359 	}
1360 }
1361 
1362 /*
1363  * Set media options.
1364  */
1365 int
1366 mvneta_mediachange(struct ifnet *ifp)
1367 {
1368 	struct mvneta_softc *sc = ifp->if_softc;
1369 
1370 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1371 		mii_mediachg(&sc->sc_mii);
1372 
1373 	return (0);
1374 }
1375 
1376 /*
1377  * Report current media status.
1378  */
1379 void
1380 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1381 {
1382 	struct mvneta_softc *sc = ifp->if_softc;
1383 
1384 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1385 		mii_pollstat(&sc->sc_mii);
1386 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1387 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1388 	}
1389 
1390 	if (sc->sc_fixed_link) {
1391 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1392 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1393 	}
1394 }
1395 
1396 void
1397 mvneta_rx_proc(struct mvneta_softc *sc)
1398 {
1399 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1400 	struct mvneta_rx_desc *rxd;
1401 	struct mvneta_buf *rxb;
1402 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1403 	struct mbuf *m;
1404 	uint32_t rxstat;
1405 	unsigned int i, done, cons;
1406 
1407 	done = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1408 	if (done == 0)
1409 		return;
1410 
1411 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1412 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTREAD);
1413 
1414 	cons = sc->sc_rx_cons;
1415 
1416 	for (i = 0; i < done; i++) {
1417 		rxd = &sc->sc_rxdesc[cons];
1418 		rxb = &sc->sc_rxbuf[cons];
1419 
1420 		m = rxb->tb_m;
1421 		rxb->tb_m = NULL;
1422 
1423 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1424 		    m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1425 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1426 
1427 		rxstat = rxd->cmdsts;
1428 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1429 #if 0
1430 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1431 
1432 			if (err == MVNETA_RX_CRC_ERROR)
1433 				ifp->if_ierrors++;
1434 			if (err == MVNETA_RX_OVERRUN_ERROR)
1435 				ifp->if_ierrors++;
1436 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1437 				ifp->if_ierrors++;
1438 			if (err == MVNETA_RX_RESOURCE_ERROR)
1439 				ifp->if_ierrors++;
1440 #else
1441 			ifp->if_ierrors++;
1442 #endif
1443 			m_freem(m);
1444 		} else {
1445 			m->m_pkthdr.len = m->m_len = rxd->bytecnt;
1446 			m_adj(m, MVNETA_HWHEADER_SIZE);
1447 
1448 			ml_enqueue(&ml, m);
1449 		}
1450 
1451 #if notyet
1452 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1453 			int flgs = 0;
1454 
1455 			/* Check IPv4 header checksum */
1456 			flgs |= M_CSUM_IPv4;
1457 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1458 				flgs |= M_CSUM_IPv4_BAD;
1459 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1460 				/*
1461 				 * Check TCPv4/UDPv4 checksum for
1462 				 * non-fragmented packet only.
1463 				 *
1464 				 * It seemd that sometimes
1465 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1466 				 * even if the checksum is correct and the
1467 				 * packet was not fragmented. So we don't set
1468 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1469 				 */
1470 
1471 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1472 					MVNETA_RX_L4_TYPE_TCP) &&
1473 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1474 					flgs |= M_CSUM_TCPv4;
1475 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1476 					MVNETA_RX_L4_TYPE_UDP) &&
1477 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1478 					flgs |= M_CSUM_UDPv4;
1479 			}
1480 			m->m_pkthdr.csum_flags = flgs;
1481 		}
1482 #endif
1483 
1484 		if_rxr_put(&sc->sc_rx_ring, 1);
1485 
1486 		cons = MVNETA_RX_RING_NEXT(cons);
1487 
1488 		if (i == MVNETA_PRXSU_MAX) {
1489 			MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1490 			    MVNETA_PRXSU_NOPD(MVNETA_PRXSU_MAX));
1491 
1492 			/* tweaking the iterator inside the loop is fun */
1493 			done -= MVNETA_PRXSU_MAX;
1494 			i = 0;
1495 		}
1496 	}
1497 
1498 	sc->sc_rx_cons = cons;
1499 
1500 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1501 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREREAD);
1502 
1503 	if (i > 0) {
1504 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1505 		    MVNETA_PRXSU_NOPD(i));
1506 	}
1507 
1508 	if (ifiq_input(&ifp->if_rcv, &ml))
1509 		if_rxr_livelocked(&sc->sc_rx_ring);
1510 
1511 	mvneta_fill_rx_ring(sc);
1512 }
1513 
1514 void
1515 mvneta_tx_proc(struct mvneta_softc *sc)
1516 {
1517 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1518 	struct ifqueue *ifq = &ifp->if_snd;
1519 	struct mvneta_tx_desc *txd;
1520 	struct mvneta_buf *txb;
1521 	unsigned int i, cons, done;
1522 
1523 	if (!(ifp->if_flags & IFF_RUNNING))
1524 		return;
1525 
1526 	done = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1527 	if (done == 0)
1528 		return;
1529 
1530 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1531 	    MVNETA_DMA_LEN(sc->sc_txring),
1532 	    BUS_DMASYNC_POSTREAD);
1533 
1534 	cons = sc->sc_tx_cons;
1535 
1536 	for (i = 0; i < done; i++) {
1537 		txd = &sc->sc_txdesc[cons];
1538 		txb = &sc->sc_txbuf[cons];
1539 
1540 		if (txb->tb_m) {
1541 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1542 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1543 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1544 
1545 			m_freem(txb->tb_m);
1546 			txb->tb_m = NULL;
1547 		}
1548 
1549 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1550 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1551 
1552 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1553 				ifp->if_collisions++;
1554 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1555 				ifp->if_oerrors++;
1556 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1557 				ifp->if_collisions++;
1558 		}
1559 
1560 		cons = MVNETA_TX_RING_NEXT(cons);
1561 
1562 		if (i == MVNETA_PTXSU_MAX) {
1563 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1564 			    MVNETA_PTXSU_NORB(MVNETA_PTXSU_MAX));
1565 
1566 			/* tweaking the iterator inside the loop is fun */
1567 			done -= MVNETA_PTXSU_MAX;
1568 			i = 0;
1569 		}
1570 	}
1571 
1572 	sc->sc_tx_cons = cons;
1573 
1574 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1575 	    MVNETA_DMA_LEN(sc->sc_txring),
1576 	    BUS_DMASYNC_PREREAD);
1577 
1578 	if (i > 0) {
1579 		MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1580 		    MVNETA_PTXSU_NORB(i));
1581 	}
1582 	if (ifq_is_oactive(ifq))
1583 		ifq_restart(ifq);
1584 }
1585 
1586 uint8_t
1587 mvneta_crc8(const uint8_t *data, size_t size)
1588 {
1589 	int bit;
1590 	uint8_t byte;
1591 	uint8_t crc = 0;
1592 	const uint8_t poly = 0x07;
1593 
1594 	while(size--)
1595 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1596 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1597 
1598 	return crc;
1599 }
1600 
1601 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1602 
1603 void
1604 mvneta_iff(struct mvneta_softc *sc)
1605 {
1606 	struct arpcom *ac = &sc->sc_ac;
1607 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1608 	struct ether_multi *enm;
1609 	struct ether_multistep step;
1610 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1611 	uint32_t pxc;
1612 	int i;
1613 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1614 
1615 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1616 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1617 	ifp->if_flags &= ~IFF_ALLMULTI;
1618 	memset(dfut, 0, sizeof(dfut));
1619 	memset(dfsmt, 0, sizeof(dfsmt));
1620 	memset(dfomt, 0, sizeof(dfomt));
1621 
1622 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1623 		ifp->if_flags |= IFF_ALLMULTI;
1624 		if (ifp->if_flags & IFF_PROMISC)
1625 			pxc |= MVNETA_PXC_UPM;
1626 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1627 			dfsmt[i] = dfomt[i] =
1628 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1629 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1630 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1631 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1632 		}
1633 	} else {
1634 		ETHER_FIRST_MULTI(step, ac, enm);
1635 		while (enm != NULL) {
1636 			/* chip handles some IPv4 multicast specially */
1637 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1638 				i = enm->enm_addrlo[5];
1639 				dfsmt[i>>2] |=
1640 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1641 			} else {
1642 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1643 				dfomt[i>>2] |=
1644 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1645 			}
1646 
1647 			ETHER_NEXT_MULTI(step, enm);
1648 		}
1649 	}
1650 
1651 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1652 
1653 	/* Set Destination Address Filter Unicast Table */
1654 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1655 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1656 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1657 
1658 	/* Set Destination Address Filter Multicast Tables */
1659 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1660 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1661 }
1662 
1663 struct mvneta_dmamem *
1664 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1665 {
1666 	struct mvneta_dmamem *mdm;
1667 	int nsegs;
1668 
1669 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1670 	mdm->mdm_size = size;
1671 
1672 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1673 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1674 		goto mdmfree;
1675 
1676 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1677 	    &nsegs, BUS_DMA_WAITOK) != 0)
1678 		goto destroy;
1679 
1680 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1681 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1682 		goto free;
1683 
1684 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1685 	    NULL, BUS_DMA_WAITOK) != 0)
1686 		goto unmap;
1687 
1688 	bzero(mdm->mdm_kva, size);
1689 
1690 	return (mdm);
1691 
1692 unmap:
1693 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1694 free:
1695 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1696 destroy:
1697 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1698 mdmfree:
1699 	free(mdm, M_DEVBUF, 0);
1700 
1701 	return (NULL);
1702 }
1703 
1704 void
1705 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1706 {
1707 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1708 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1709 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1710 	free(mdm, M_DEVBUF, 0);
1711 }
1712 
1713 static inline struct mbuf *
1714 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1715 {
1716 	struct mbuf *m = NULL;
1717 
1718 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1719 	if (m == NULL)
1720 		return (NULL);
1721 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1722 
1723 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1724 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1725 		m_freem(m);
1726 		return (NULL);
1727 	}
1728 
1729 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1730 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1731 
1732 	return (m);
1733 }
1734 
1735 void
1736 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1737 {
1738 	struct mvneta_rx_desc *rxd;
1739 	struct mvneta_buf *rxb;
1740 	unsigned int slots, used = 0;
1741 	unsigned int prod;
1742 
1743 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1744 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTWRITE);
1745 
1746 	prod = sc->sc_rx_prod;
1747 
1748 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_PRXSU_MAX);
1749 	    slots > 0; slots--) {
1750 		rxb = &sc->sc_rxbuf[prod];
1751 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1752 		if (rxb->tb_m == NULL)
1753 			break;
1754 
1755 		rxd = &sc->sc_rxdesc[prod];
1756 		rxd->cmdsts = 0;
1757 		rxd->bufsize = 0;
1758 		rxd->bytecnt = 0;
1759 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1760 		rxd->nextdescptr = 0;
1761 		rxd->_padding[0] = 0;
1762 		rxd->_padding[1] = 0;
1763 		rxd->_padding[2] = 0;
1764 		rxd->_padding[3] = 0;
1765 
1766 		prod = MVNETA_RX_RING_NEXT(prod);
1767 		used++;
1768 	}
1769 	if_rxr_put(&sc->sc_rx_ring, slots);
1770 
1771 	sc->sc_rx_prod = prod;
1772 
1773 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1774 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREWRITE);
1775 
1776 	if (used > 0)
1777 		MVNETA_WRITE(sc, MVNETA_PRXSU(0), MVNETA_PRXSU_NOND(used));
1778 }
1779 
1780 #if NKSTAT > 0
1781 
1782 /* this is used to sort and look up the array of kstats quickly */
1783 enum mvneta_stat {
1784 	mvneta_stat_good_octets_received,
1785 	mvneta_stat_bad_octets_received,
1786 	mvneta_stat_good_frames_received,
1787 	mvneta_stat_mac_trans_error,
1788 	mvneta_stat_bad_frames_received,
1789 	mvneta_stat_broadcast_frames_received,
1790 	mvneta_stat_multicast_frames_received,
1791 	mvneta_stat_frames_64_octets,
1792 	mvneta_stat_frames_65_to_127_octets,
1793 	mvneta_stat_frames_128_to_255_octets,
1794 	mvneta_stat_frames_256_to_511_octets,
1795 	mvneta_stat_frames_512_to_1023_octets,
1796 	mvneta_stat_frames_1024_to_max_octets,
1797 	mvneta_stat_good_octets_sent,
1798 	mvneta_stat_good_frames_sent,
1799 	mvneta_stat_excessive_collision,
1800 	mvneta_stat_multicast_frames_sent,
1801 	mvneta_stat_broadcast_frames_sent,
1802 	mvneta_stat_unrecog_mac_control_received,
1803 	mvneta_stat_good_fc_received,
1804 	mvneta_stat_bad_fc_received,
1805 	mvneta_stat_undersize,
1806 	mvneta_stat_fc_sent,
1807 	mvneta_stat_fragments,
1808 	mvneta_stat_oversize,
1809 	mvneta_stat_jabber,
1810 	mvneta_stat_mac_rcv_error,
1811 	mvneta_stat_bad_crc,
1812 	mvneta_stat_collisions,
1813 	mvneta_stat_late_collisions,
1814 
1815 	mvneta_stat_port_discard,
1816 	mvneta_stat_port_overrun,
1817 
1818 	mvnet_stat_count
1819 };
1820 
1821 struct mvneta_counter {
1822 	const char		 *name;
1823 	enum kstat_kv_unit	 unit;
1824 	bus_size_t		 reg;
1825 };
1826 
1827 static const struct mvneta_counter mvneta_counters[] = {
1828 	[mvneta_stat_good_octets_received] =
1829 	    { "rx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1830 	[mvneta_stat_bad_octets_received] =
1831 	    { "rx bad",		KSTAT_KV_U_BYTES,	0x3008 },
1832 	[mvneta_stat_good_frames_received] =
1833 	    { "rx good",	KSTAT_KV_U_PACKETS,	0x3010 },
1834 	[mvneta_stat_mac_trans_error] =
1835 	    { "tx mac error",	KSTAT_KV_U_PACKETS,	0x300c },
1836 	[mvneta_stat_bad_frames_received] =
1837 	    { "rx bad",		KSTAT_KV_U_PACKETS,	0x3014 },
1838 	[mvneta_stat_broadcast_frames_received] =
1839 	    { "rx bcast",	KSTAT_KV_U_PACKETS,	0x3018 },
1840 	[mvneta_stat_multicast_frames_received] =
1841 	    { "rx mcast",	KSTAT_KV_U_PACKETS,	0x301c },
1842 	[mvneta_stat_frames_64_octets] =
1843 	    { "64B",		KSTAT_KV_U_PACKETS,	0x3020 },
1844 	[mvneta_stat_frames_65_to_127_octets] =
1845 	    { "65-127B",	KSTAT_KV_U_PACKETS,	0x3024 },
1846 	[mvneta_stat_frames_128_to_255_octets] =
1847 	    { "128-255B",	KSTAT_KV_U_PACKETS,	0x3028 },
1848 	[mvneta_stat_frames_256_to_511_octets] =
1849 	    { "256-511B",	KSTAT_KV_U_PACKETS,	0x302c },
1850 	[mvneta_stat_frames_512_to_1023_octets] =
1851 	    { "512-1023B",	KSTAT_KV_U_PACKETS,	0x3030 },
1852 	[mvneta_stat_frames_1024_to_max_octets] =
1853 	    { "1024-maxB",	KSTAT_KV_U_PACKETS,	0x3034 },
1854 	[mvneta_stat_good_octets_sent] =
1855 	    { "tx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1856 	[mvneta_stat_good_frames_sent] =
1857 	    { "tx good",	KSTAT_KV_U_PACKETS,	0x3040 },
1858 	[mvneta_stat_excessive_collision] =
1859 	    { "tx excess coll",	KSTAT_KV_U_PACKETS,	0x3044 },
1860 	[mvneta_stat_multicast_frames_sent] =
1861 	    { "tx mcast",	KSTAT_KV_U_PACKETS,	0x3048 },
1862 	[mvneta_stat_broadcast_frames_sent] =
1863 	    { "tx bcast",	KSTAT_KV_U_PACKETS,	0x304c },
1864 	[mvneta_stat_unrecog_mac_control_received] =
1865 	    { "rx unknown fc",	KSTAT_KV_U_PACKETS,	0x3050 },
1866 	[mvneta_stat_good_fc_received] =
1867 	    { "rx fc good",	KSTAT_KV_U_PACKETS,	0x3058 },
1868 	[mvneta_stat_bad_fc_received] =
1869 	    { "rx fc bad",	KSTAT_KV_U_PACKETS,	0x305c },
1870 	[mvneta_stat_undersize] =
1871 	    { "rx undersize",	KSTAT_KV_U_PACKETS,	0x3060 },
1872 	[mvneta_stat_fc_sent] =
1873 	    { "tx fc",		KSTAT_KV_U_PACKETS,	0x3054 },
1874 	[mvneta_stat_fragments] =
1875 	    { "rx fragments",	KSTAT_KV_U_NONE,	0x3064 },
1876 	[mvneta_stat_oversize] =
1877 	    { "rx oversize",	KSTAT_KV_U_PACKETS,	0x3068 },
1878 	[mvneta_stat_jabber] =
1879 	    { "rx jabber",	KSTAT_KV_U_PACKETS,	0x306c },
1880 	[mvneta_stat_mac_rcv_error] =
1881 	    { "rx mac errors",	KSTAT_KV_U_PACKETS,	0x3070 },
1882 	[mvneta_stat_bad_crc] =
1883 	    { "rx bad crc",	KSTAT_KV_U_PACKETS,	0x3074 },
1884 	[mvneta_stat_collisions] =
1885 	    { "rx colls",	KSTAT_KV_U_PACKETS,	0x3078 },
1886 	[mvneta_stat_late_collisions] =
1887 	    { "rx late colls",	KSTAT_KV_U_PACKETS,	0x307c },
1888 
1889 	[mvneta_stat_port_discard] =
1890 	    { "rx discard",	KSTAT_KV_U_PACKETS,	MVNETA_PXDFC },
1891 	[mvneta_stat_port_overrun] =
1892 	    { "rx overrun",	KSTAT_KV_U_PACKETS,	MVNETA_POFC },
1893 };
1894 
1895 CTASSERT(nitems(mvneta_counters) == mvnet_stat_count);
1896 
1897 int
1898 mvneta_kstat_read(struct kstat *ks)
1899 {
1900 	struct mvneta_softc *sc = ks->ks_softc;
1901 	struct kstat_kv *kvs = ks->ks_data;
1902 	unsigned int i;
1903 	uint32_t hi, lo;
1904 
1905 	for (i = 0; i < nitems(mvneta_counters); i++) {
1906 		const struct mvneta_counter *c = &mvneta_counters[i];
1907 		if (c->reg == 0)
1908 			continue;
1909 
1910 		kstat_kv_u64(&kvs[i]) += (uint64_t)MVNETA_READ(sc, c->reg);
1911 	}
1912 
1913 	/* handle the exceptions */
1914 
1915 	lo = MVNETA_READ(sc, 0x3000);
1916 	hi = MVNETA_READ(sc, 0x3004);
1917 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_received]) +=
1918 	    (uint64_t)hi << 32 | (uint64_t)lo;
1919 
1920 	lo = MVNETA_READ(sc, 0x3038);
1921 	hi = MVNETA_READ(sc, 0x303c);
1922 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_sent]) +=
1923 	    (uint64_t)hi << 32 | (uint64_t)lo;
1924 
1925 	nanouptime(&ks->ks_updated);
1926 
1927 	return (0);
1928 }
1929 
1930 void
1931 mvneta_kstat_tick(void *arg)
1932 {
1933 	struct mvneta_softc *sc = arg;
1934 
1935 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1936 
1937 	if (mtx_enter_try(&sc->sc_kstat_lock)) {
1938 		mvneta_kstat_read(sc->sc_kstat);
1939 		mtx_leave(&sc->sc_kstat_lock);
1940 	}
1941 }
1942 
1943 void
1944 mvneta_kstat_attach(struct mvneta_softc *sc)
1945 {
1946 	struct kstat *ks;
1947 	struct kstat_kv *kvs;
1948 	unsigned int i;
1949 
1950 	mtx_init(&sc->sc_kstat_lock, IPL_SOFTCLOCK);
1951 	timeout_set(&sc->sc_kstat_tick, mvneta_kstat_tick, sc);
1952 
1953 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "mvneta-stats", 0,
1954 	    KSTAT_T_KV, 0);
1955 	if (ks == NULL)
1956 		return;
1957 
1958 	kvs = mallocarray(nitems(mvneta_counters), sizeof(*kvs),
1959 	    M_DEVBUF, M_WAITOK|M_ZERO);
1960 	for (i = 0; i < nitems(mvneta_counters); i++) {
1961 		const struct mvneta_counter *c = &mvneta_counters[i];
1962 		kstat_kv_unit_init(&kvs[i], c->name,
1963 		    KSTAT_KV_T_COUNTER64, c->unit);
1964 	}
1965 
1966 	ks->ks_softc = sc;
1967 	ks->ks_data = kvs;
1968 	ks->ks_datalen = nitems(mvneta_counters) * sizeof(*kvs);
1969 	ks->ks_read = mvneta_kstat_read;
1970 	kstat_set_mutex(ks, &sc->sc_kstat_lock);
1971 
1972 	kstat_install(ks);
1973 
1974 	sc->sc_kstat = ks;
1975 
1976 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1977 }
1978 
1979 #endif
1980