xref: /openbsd-src/sys/dev/fdt/if_mvneta.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*	$OpenBSD: if_mvneta.c,v 1.16 2020/12/12 11:48:52 jan Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 
31 #include <sys/param.h>
32 #include <sys/device.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/errno.h>
36 #include <sys/kernel.h>
37 #include <sys/mutex.h>
38 #include <sys/socket.h>
39 #include <sys/sockio.h>
40 #include <uvm/uvm_extern.h>
41 #include <sys/mbuf.h>
42 
43 #include <machine/bus.h>
44 #include <machine/cpufunc.h>
45 #include <machine/fdt.h>
46 
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_clock.h>
49 #include <dev/ofw/ofw_misc.h>
50 #include <dev/ofw/ofw_pinctrl.h>
51 #include <dev/ofw/fdt.h>
52 
53 #include <dev/fdt/if_mvnetareg.h>
54 
55 #ifdef __armv7__
56 #include <armv7/marvell/mvmbusvar.h>
57 #endif
58 
59 #include <net/if.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 
63 #include <net/bpf.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/if_ether.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #ifdef MVNETA_DEBUG
76 #define DPRINTF(x)	if (mvneta_debug) printf x
77 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
78 int mvneta_debug = MVNETA_DEBUG;
79 #else
80 #define DPRINTF(x)
81 #define DPRINTFN(n,x)
82 #endif
83 
84 #define MVNETA_READ(sc, reg) \
85 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
86 #define MVNETA_WRITE(sc, reg, val) \
87 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
88 #define MVNETA_READ_FILTER(sc, reg, val, c) \
89 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
90 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
91 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
92 
93 #define MVNETA_LINKUP_READ(sc) \
94 	MVNETA_READ(sc, MVNETA_PS0)
95 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
96 
97 #define MVNETA_TX_RING_CNT	256
98 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
99 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
100 #define MVNETA_TX_QUEUE_CNT	1
101 #define MVNETA_RX_RING_CNT	256
102 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
103 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
104 #define MVNETA_RX_QUEUE_CNT	1
105 
106 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
107 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
108 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
109 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
110 
111 #define MVNETA_NTXSEG		30
112 
113 struct mvneta_dmamem {
114 	bus_dmamap_t		mdm_map;
115 	bus_dma_segment_t	mdm_seg;
116 	size_t			mdm_size;
117 	caddr_t			mdm_kva;
118 };
119 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
120 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
121 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
122 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
123 
124 struct mvneta_buf {
125 	bus_dmamap_t	tb_map;
126 	struct mbuf	*tb_m;
127 };
128 
129 struct mvneta_softc {
130 	struct device sc_dev;
131 	struct mii_bus *sc_mdio;
132 
133 	bus_space_tag_t sc_iot;
134 	bus_space_handle_t sc_ioh;
135 	bus_dma_tag_t sc_dmat;
136 
137 	struct arpcom sc_ac;
138 #define sc_enaddr sc_ac.ac_enaddr
139 	struct mii_data sc_mii;
140 #define sc_media sc_mii.mii_media
141 
142 	struct timeout sc_tick_ch;
143 
144 	struct mvneta_dmamem	*sc_txring;
145 	struct mvneta_buf	*sc_txbuf;
146 	struct mvneta_tx_desc	*sc_txdesc;
147 	int			 sc_tx_prod;	/* next free tx desc */
148 	int			 sc_tx_cnt;	/* amount of tx sent */
149 	int			 sc_tx_cons;	/* first tx desc sent */
150 
151 	struct mvneta_dmamem	*sc_rxring;
152 	struct mvneta_buf	*sc_rxbuf;
153 	struct mvneta_rx_desc	*sc_rxdesc;
154 	int			 sc_rx_prod;	/* next rx desc to fill */
155 	struct if_rxring	 sc_rx_ring;
156 	int			 sc_rx_cons;	/* next rx desc recvd */
157 
158 	enum {
159 		PHY_MODE_QSGMII,
160 		PHY_MODE_SGMII,
161 		PHY_MODE_RGMII,
162 		PHY_MODE_RGMII_ID,
163 		PHY_MODE_1000BASEX,
164 		PHY_MODE_2500BASEX,
165 	}			 sc_phy_mode;
166 	int			 sc_fixed_link;
167 	int			 sc_inband_status;
168 	int			 sc_phy;
169 	int			 sc_phyloc;
170 	int			 sc_link;
171 	int			 sc_sfp;
172 	int			 sc_node;
173 };
174 
175 
176 int mvneta_miibus_readreg(struct device *, int, int);
177 void mvneta_miibus_writereg(struct device *, int, int, int);
178 void mvneta_miibus_statchg(struct device *);
179 
180 void mvneta_wininit(struct mvneta_softc *);
181 
182 /* Gigabit Ethernet Port part functions */
183 int mvneta_match(struct device *, void *, void *);
184 void mvneta_attach(struct device *, struct device *, void *);
185 void mvneta_attach_deferred(struct device *);
186 
187 void mvneta_tick(void *);
188 int mvneta_intr(void *);
189 
190 void mvneta_start(struct ifnet *);
191 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
192 void mvneta_inband_statchg(struct mvneta_softc *);
193 void mvneta_port_change(struct mvneta_softc *);
194 void mvneta_port_up(struct mvneta_softc *);
195 int mvneta_up(struct mvneta_softc *);
196 void mvneta_down(struct mvneta_softc *);
197 void mvneta_watchdog(struct ifnet *);
198 
199 int mvneta_mediachange(struct ifnet *);
200 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
201 
202 int mvneta_encap(struct mvneta_softc *, struct mbuf *, uint32_t *);
203 void mvneta_rx_proc(struct mvneta_softc *);
204 void mvneta_tx_proc(struct mvneta_softc *);
205 uint8_t mvneta_crc8(const uint8_t *, size_t);
206 void mvneta_iff(struct mvneta_softc *);
207 
208 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
209     bus_size_t, bus_size_t);
210 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
211 void mvneta_fill_rx_ring(struct mvneta_softc *);
212 
213 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff");
214 
215 struct cfdriver mvneta_cd = {
216 	NULL, "mvneta", DV_IFNET
217 };
218 
219 struct cfattach mvneta_ca = {
220 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
221 };
222 
223 int
224 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
225 {
226 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
227 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
228 }
229 
230 void
231 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
232 {
233 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
234 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
235 }
236 
237 void
238 mvneta_miibus_statchg(struct device *self)
239 {
240 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
241 
242 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
243 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
244 
245 		panc &= ~(MVNETA_PANC_SETMIISPEED |
246 			  MVNETA_PANC_SETGMIISPEED |
247 			  MVNETA_PANC_SETFULLDX);
248 
249 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
250 		case IFM_1000_SX:
251 		case IFM_1000_LX:
252 		case IFM_1000_CX:
253 		case IFM_1000_T:
254 			panc |= MVNETA_PANC_SETGMIISPEED;
255 			break;
256 		case IFM_100_TX:
257 			panc |= MVNETA_PANC_SETMIISPEED;
258 			break;
259 		case IFM_10_T:
260 			break;
261 		}
262 
263 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
264 			panc |= MVNETA_PANC_SETFULLDX;
265 
266 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
267 	}
268 
269 	mvneta_port_change(sc);
270 }
271 
272 void
273 mvneta_inband_statchg(struct mvneta_softc *sc)
274 {
275 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
276 	uint32_t reg;
277 
278 	sc->sc_mii.mii_media_status = IFM_AVALID;
279 	sc->sc_mii.mii_media_active = IFM_ETHER;
280 
281 	reg = MVNETA_READ(sc, MVNETA_PS0);
282 	if (reg & MVNETA_PS0_LINKUP)
283 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
284 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
285 		sc->sc_mii.mii_media_active |= subtype;
286 	else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
287 		sc->sc_mii.mii_media_active |= subtype;
288 	else if (reg & MVNETA_PS0_GMIISPEED)
289 		sc->sc_mii.mii_media_active |= IFM_1000_T;
290 	else if (reg & MVNETA_PS0_MIISPEED)
291 		sc->sc_mii.mii_media_active |= IFM_100_TX;
292 	else
293 		sc->sc_mii.mii_media_active |= IFM_10_T;
294 	if (reg & MVNETA_PS0_FULLDX)
295 		sc->sc_mii.mii_media_active |= IFM_FDX;
296 
297 	mvneta_port_change(sc);
298 }
299 
300 void
301 mvneta_enaddr_write(struct mvneta_softc *sc)
302 {
303 	uint32_t maddrh, maddrl;
304 	maddrh  = sc->sc_enaddr[0] << 24;
305 	maddrh |= sc->sc_enaddr[1] << 16;
306 	maddrh |= sc->sc_enaddr[2] << 8;
307 	maddrh |= sc->sc_enaddr[3];
308 	maddrl  = sc->sc_enaddr[4] << 8;
309 	maddrl |= sc->sc_enaddr[5];
310 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
311 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
312 }
313 
314 void
315 mvneta_wininit(struct mvneta_softc *sc)
316 {
317 	uint32_t en;
318 	int i;
319 
320 #ifdef __armv7__
321 	if (mvmbus_dram_info == NULL)
322 		panic("%s: mbus dram information not set up",
323 		    sc->sc_dev.dv_xname);
324 #endif
325 
326 	for (i = 0; i < MVNETA_NWINDOW; i++) {
327 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
328 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
329 
330 		if (i < MVNETA_NREMAP)
331 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
332 	}
333 
334 	en = MVNETA_BARE_EN_MASK;
335 
336 #ifdef __armv7__
337 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
338 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
339 
340 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
341 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
342 		    MVNETA_BASEADDR_ATTR(win->attr)	|
343 		    MVNETA_BASEADDR_BASE(win->base));
344 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
345 
346 		en &= ~(1 << i);
347 	}
348 #else
349 	MVNETA_WRITE(sc, MVNETA_S(0), MVNETA_S_SIZE(0));
350 	en &= ~(1 << 0);
351 #endif
352 
353 	MVNETA_WRITE(sc, MVNETA_BARE, en);
354 }
355 
356 #define COMPHY_SIP_POWER_ON	0x82000001
357 #define COMPHY_SIP_POWER_OFF	0x82000002
358 #define COMPHY_SPEED(x)		((x) << 2)
359 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
360 #define  COMPHY_SPEED_2_5G		1
361 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
362 #define  COMPHY_SPEED_5G		3
363 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
364 #define  COMPHY_SPEED_6G		5
365 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
366 #define COMPHY_UNIT(x)		((x) << 8)
367 #define COMPHY_MODE(x)		((x) << 12)
368 #define  COMPHY_MODE_SATA		1
369 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
370 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
371 #define  COMPHY_MODE_USB3H		4
372 #define  COMPHY_MODE_USB3D		5
373 #define  COMPHY_MODE_PCIE		6
374 #define  COMPHY_MODE_RXAUI		7
375 #define  COMPHY_MODE_XFI		8
376 #define  COMPHY_MODE_SFI		9
377 #define  COMPHY_MODE_USB3		10
378 
379 void
380 mvneta_comphy_init(struct mvneta_softc *sc)
381 {
382 	int node, phys[2], lane, unit;
383 	uint32_t mode;
384 
385 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
386 	    sizeof(phys))
387 		return;
388 	node = OF_getnodebyphandle(phys[0]);
389 	if (!node)
390 		return;
391 
392 	lane = OF_getpropint(node, "reg", 0);
393 	unit = phys[1];
394 
395 	switch (sc->sc_phy_mode) {
396 	case PHY_MODE_1000BASEX:
397 	case PHY_MODE_SGMII:
398 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
399 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
400 		    COMPHY_UNIT(unit);
401 		break;
402 	case PHY_MODE_2500BASEX:
403 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
404 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
405 		    COMPHY_UNIT(unit);
406 		break;
407 	default:
408 		return;
409 	}
410 
411 	smc_call(COMPHY_SIP_POWER_ON, lane, mode, 0);
412 }
413 
414 int
415 mvneta_match(struct device *parent, void *cfdata, void *aux)
416 {
417 	struct fdt_attach_args *faa = aux;
418 
419 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
420 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
421 }
422 
423 void
424 mvneta_attach(struct device *parent, struct device *self, void *aux)
425 {
426 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
427 	struct fdt_attach_args *faa = aux;
428 	uint32_t ctl0, ctl2, ctl4, panc;
429 	struct ifnet *ifp;
430 	int i, len, node;
431 	char *phy_mode;
432 	char *managed;
433 
434 	printf("\n");
435 
436 	sc->sc_iot = faa->fa_iot;
437 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
438 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
439 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
440 		printf("%s: cannot map registers\n", self->dv_xname);
441 		return;
442 	}
443 	sc->sc_dmat = faa->fa_dmat;
444 	sc->sc_node = faa->fa_node;
445 
446 	clock_enable(faa->fa_node, NULL);
447 
448 	pinctrl_byname(faa->fa_node, "default");
449 
450 	len = OF_getproplen(faa->fa_node, "phy-mode");
451 	if (len <= 0) {
452 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
453 		return;
454 	}
455 
456 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
457 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
458 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
459 		sc->sc_phy_mode = PHY_MODE_QSGMII;
460 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
461 		sc->sc_phy_mode = PHY_MODE_SGMII;
462 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
463 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
464 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
465 		sc->sc_phy_mode = PHY_MODE_RGMII;
466 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
467 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
468 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
469 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
470 	else {
471 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
472 		    phy_mode);
473 		return;
474 	}
475 	free(phy_mode, M_TEMP, len);
476 
477 	/* TODO: check child's name to be "fixed-link" */
478 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
479 	    OF_child(faa->fa_node))
480 		sc->sc_fixed_link = 1;
481 
482 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
483 		managed = malloc(len, M_TEMP, M_WAITOK);
484 		OF_getprop(faa->fa_node, "managed", managed, len);
485 		if (!strncmp(managed, "in-band-status",
486 		    strlen("in-band-status"))) {
487 			sc->sc_fixed_link = 1;
488 			sc->sc_inband_status = 1;
489 		}
490 		free(managed, M_TEMP, len);
491 	}
492 
493 	if (!sc->sc_fixed_link) {
494 		sc->sc_phy = OF_getpropint(faa->fa_node, "phy", 0);
495 		node = OF_getnodebyphandle(sc->sc_phy);
496 		if (!node) {
497 			printf("%s: cannot find phy in fdt\n", self->dv_xname);
498 			return;
499 		}
500 
501 		if ((sc->sc_phyloc = OF_getpropint(node, "reg", -1)) == -1) {
502 			printf("%s: cannot extract phy addr\n", self->dv_xname);
503 			return;
504 		}
505 	}
506 
507 	mvneta_wininit(sc);
508 
509 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
510 	    ETHER_ADDR_LEN) {
511 		OF_getprop(faa->fa_node, "local-mac-address",
512 		    sc->sc_enaddr, ETHER_ADDR_LEN);
513 		mvneta_enaddr_write(sc);
514 	} else {
515 		uint32_t maddrh, maddrl;
516 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
517 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
518 		if (maddrh || maddrl) {
519 			sc->sc_enaddr[0] = maddrh >> 24;
520 			sc->sc_enaddr[1] = maddrh >> 16;
521 			sc->sc_enaddr[2] = maddrh >> 8;
522 			sc->sc_enaddr[3] = maddrh >> 0;
523 			sc->sc_enaddr[4] = maddrl >> 8;
524 			sc->sc_enaddr[5] = maddrl >> 0;
525 		} else
526 			ether_fakeaddr(&sc->sc_ac.ac_if);
527 	}
528 
529 	sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0);
530 
531 	printf("%s: Ethernet address %s\n", self->dv_xname,
532 	    ether_sprintf(sc->sc_enaddr));
533 
534 	/* disable port */
535 	MVNETA_WRITE(sc, MVNETA_PMACC0,
536 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
537 	delay(200);
538 
539 	/* clear all cause registers */
540 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
541 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
542 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
543 
544 	/* mask all interrupts */
545 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
546 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
547 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
548 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
549 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
550 
551 	/* enable MBUS Retry bit16 */
552 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
553 
554 	/* enable access for CPU0 */
555 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
556 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
557 
558 	/* reset RX and TX DMAs */
559 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
560 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
561 
562 	/* disable legacy WRR, disable EJP, release from reset */
563 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
564 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
565 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
566 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
567 	}
568 
569 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
570 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
571 
572 	/* set port acceleration mode */
573 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
574 
575 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
576 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
577 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
578 
579 	/* Set SDC register except IPGINT bits */
580 	MVNETA_WRITE(sc, MVNETA_SDC,
581 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
582 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
583 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
584 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
585 
586 	/* XXX: Disable PHY polling in hardware */
587 	MVNETA_WRITE(sc, MVNETA_EUC,
588 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
589 
590 	/* clear uni-/multicast tables */
591 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
592 	memset(dfut, 0, sizeof(dfut));
593 	memset(dfsmt, 0, sizeof(dfut));
594 	memset(dfomt, 0, sizeof(dfut));
595 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
596 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
597 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
598 
599 	MVNETA_WRITE(sc, MVNETA_PIE,
600 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
601 
602 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
603 
604 	/* Setup phy. */
605 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
606 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
607 	ctl4 = MVNETA_READ(sc, MVNETA_PMACC4);
608 	panc = MVNETA_READ(sc, MVNETA_PANC);
609 
610 	/* Force link down to change in-band settings. */
611 	panc &= ~MVNETA_PANC_FORCELINKPASS;
612 	panc |= MVNETA_PANC_FORCELINKFAIL;
613 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
614 
615 	mvneta_comphy_init(sc);
616 
617 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
618 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
619 	ctl4 &= ~(MVNETA_PMACC4_SHORT_PREAMBLE);
620 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
621 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
622 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
623 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
624 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
625 
626 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
627 	switch (sc->sc_phy_mode) {
628 	case PHY_MODE_QSGMII:
629 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
630 		    MVNETA_SERDESCFG_QSGMII_PROTO);
631 		ctl2 |= MVNETA_PMACC2_PCSEN;
632 		break;
633 	case PHY_MODE_SGMII:
634 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
635 		    MVNETA_SERDESCFG_SGMII_PROTO);
636 		ctl2 |= MVNETA_PMACC2_PCSEN;
637 		break;
638 	case PHY_MODE_1000BASEX:
639 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
640 		    MVNETA_SERDESCFG_SGMII_PROTO);
641 		ctl2 |= MVNETA_PMACC2_PCSEN;
642 		break;
643 	case PHY_MODE_2500BASEX:
644 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
645 		    MVNETA_SERDESCFG_HSGMII_PROTO);
646 		ctl2 |= MVNETA_PMACC2_PCSEN;
647 		ctl4 |= MVNETA_PMACC4_SHORT_PREAMBLE;
648 		break;
649 	default:
650 		break;
651 	}
652 
653 	/* Use Auto-Negotiation for Inband Status only */
654 	if (sc->sc_inband_status) {
655 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
656 		    MVNETA_PANC_FORCELINKPASS);
657 		/* TODO: read mode from SFP */
658 		if (1) {
659 			/* 802.3z */
660 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
661 			panc |= (MVNETA_PANC_INBANDANEN |
662 			    MVNETA_PANC_SETGMIISPEED |
663 			    MVNETA_PANC_SETFULLDX);
664 		} else {
665 			/* SGMII */
666 			ctl2 |= MVNETA_PMACC2_INBANDAN;
667 			panc |= (MVNETA_PANC_INBANDANEN |
668 			    MVNETA_PANC_ANSPEEDEN |
669 			    MVNETA_PANC_ANDUPLEXEN);
670 		}
671 		MVNETA_WRITE(sc, MVNETA_OMSCD,
672 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
673 	} else {
674 		MVNETA_WRITE(sc, MVNETA_OMSCD,
675 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
676 	}
677 
678 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
679 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
680 	MVNETA_WRITE(sc, MVNETA_PMACC4, ctl4);
681 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
682 
683 	/* Port reset */
684 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
685 		;
686 
687 	fdt_intr_establish(faa->fa_node, IPL_NET, mvneta_intr, sc,
688 	    sc->sc_dev.dv_xname);
689 
690 	ifp = &sc->sc_ac.ac_if;
691 	ifp->if_softc = sc;
692 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
693 	ifp->if_start = mvneta_start;
694 	ifp->if_ioctl = mvneta_ioctl;
695 	ifp->if_watchdog = mvneta_watchdog;
696 	ifp->if_capabilities = IFCAP_VLAN_MTU;
697 
698 #if notyet
699 	/*
700 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
701 	 */
702 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
703 				IFCAP_CSUM_UDPv4;
704 
705 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
706 	/*
707 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
708 	 */
709 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
710 #endif
711 
712 	ifq_set_maxlen(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
713 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
714 
715 	/*
716 	 * Do MII setup.
717 	 */
718 	sc->sc_mii.mii_ifp = ifp;
719 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
720 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
721 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
722 
723 	ifmedia_init(&sc->sc_mii.mii_media, 0,
724 	    mvneta_mediachange, mvneta_mediastatus);
725 
726 	config_defer(self, mvneta_attach_deferred);
727 }
728 
729 void
730 mvneta_attach_deferred(struct device *self)
731 {
732 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
733 	struct ifnet *ifp = &sc->sc_ac.ac_if;
734 
735 	if (!sc->sc_fixed_link) {
736 		sc->sc_mdio = mii_byphandle(sc->sc_phy);
737 		if (sc->sc_mdio == NULL) {
738 			printf("%s: mdio bus not yet attached\n", self->dv_xname);
739 			return;
740 		}
741 
742 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
743 		    MII_OFFSET_ANY, 0);
744 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
745 			printf("%s: no PHY found!\n", self->dv_xname);
746 			ifmedia_add(&sc->sc_mii.mii_media,
747 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
748 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
749 		} else
750 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
751 	} else {
752 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
753 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
754 
755 		if (sc->sc_inband_status) {
756 			switch (sc->sc_phy_mode) {
757 			case PHY_MODE_1000BASEX:
758 				sc->sc_mii.mii_media_active =
759 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
760 				break;
761 			case PHY_MODE_2500BASEX:
762 				sc->sc_mii.mii_media_active =
763 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
764 				break;
765 			default:
766 				break;
767 			}
768 			mvneta_inband_statchg(sc);
769 		} else {
770 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
771 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
772 			mvneta_miibus_statchg(self);
773 		}
774 
775 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
776 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
777 	}
778 
779 	/*
780 	 * Call MI attach routines.
781 	 */
782 	if_attach(ifp);
783 	ether_ifattach(ifp);
784 }
785 
786 void
787 mvneta_tick(void *arg)
788 {
789 	struct mvneta_softc *sc = arg;
790 	struct mii_data *mii = &sc->sc_mii;
791 	int s;
792 
793 	s = splnet();
794 	mii_tick(mii);
795 	splx(s);
796 
797 	timeout_add_sec(&sc->sc_tick_ch, 1);
798 }
799 
800 int
801 mvneta_intr(void *arg)
802 {
803 	struct mvneta_softc *sc = arg;
804 	struct ifnet *ifp = &sc->sc_ac.ac_if;
805 	uint32_t ic, misc;
806 
807 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
808 
809 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
810 		misc = MVNETA_READ(sc, MVNETA_PMIC);
811 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
812 		if (sc->sc_inband_status && (misc &
813 		    (MVNETA_PMI_PHYSTATUSCHNG |
814 		    MVNETA_PMI_LINKCHANGE |
815 		    MVNETA_PMI_PSCSYNCCHNG))) {
816 			mvneta_inband_statchg(sc);
817 		}
818 	}
819 
820 	if (!(ifp->if_flags & IFF_RUNNING))
821 		return 1;
822 
823 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
824 		mvneta_tx_proc(sc);
825 
826 	if (ic & MVNETA_PRXTXTI_RBICTAPQ(0))
827 		mvneta_rx_proc(sc);
828 
829 	if (!ifq_empty(&ifp->if_snd))
830 		mvneta_start(ifp);
831 
832 	return 1;
833 }
834 
835 void
836 mvneta_start(struct ifnet *ifp)
837 {
838 	struct mvneta_softc *sc = ifp->if_softc;
839 	struct mbuf *m_head = NULL;
840 	int idx;
841 
842 	DPRINTFN(3, ("mvneta_start (idx %d)\n", sc->sc_tx_prod));
843 
844 	if (!(ifp->if_flags & IFF_RUNNING))
845 		return;
846 	if (ifq_is_oactive(&ifp->if_snd))
847 		return;
848 	if (ifq_empty(&ifp->if_snd))
849 		return;
850 
851 	/* If Link is DOWN, can't start TX */
852 	if (!MVNETA_IS_LINKUP(sc))
853 		return;
854 
855 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
856 	    MVNETA_DMA_LEN(sc->sc_txring),
857 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
858 
859 	idx = sc->sc_tx_prod;
860 	while (sc->sc_tx_cnt < MVNETA_TX_RING_CNT) {
861 		m_head = ifq_deq_begin(&ifp->if_snd);
862 		if (m_head == NULL)
863 			break;
864 
865 		/*
866 		 * Pack the data into the transmit ring. If we
867 		 * don't have room, set the OACTIVE flag and wait
868 		 * for the NIC to drain the ring.
869 		 */
870 		if (mvneta_encap(sc, m_head, &idx)) {
871 			ifq_deq_rollback(&ifp->if_snd, m_head);
872 			ifq_set_oactive(&ifp->if_snd);
873 			break;
874 		}
875 
876 		/* now we are committed to transmit the packet */
877 		ifq_deq_commit(&ifp->if_snd, m_head);
878 
879 		/*
880 		 * If there's a BPF listener, bounce a copy of this frame
881 		 * to him.
882 		 */
883 #if NBPFILTER > 0
884 		if (ifp->if_bpf)
885 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
886 #endif
887 	}
888 
889 	if (sc->sc_tx_prod != idx) {
890 		sc->sc_tx_prod = idx;
891 
892 		/*
893 		 * Set a timeout in case the chip goes out to lunch.
894 		 */
895 		ifp->if_timer = 5;
896 	}
897 }
898 
899 int
900 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
901 {
902 	struct mvneta_softc *sc = ifp->if_softc;
903 	struct ifreq *ifr = (struct ifreq *)addr;
904 	int s, error = 0;
905 
906 	s = splnet();
907 
908 	switch (cmd) {
909 	case SIOCSIFADDR:
910 		ifp->if_flags |= IFF_UP;
911 		/* FALLTHROUGH */
912 	case SIOCSIFFLAGS:
913 		if (ifp->if_flags & IFF_UP) {
914 			if (ifp->if_flags & IFF_RUNNING)
915 				error = ENETRESET;
916 			else
917 				mvneta_up(sc);
918 		} else {
919 			if (ifp->if_flags & IFF_RUNNING)
920 				mvneta_down(sc);
921 		}
922 		break;
923 	case SIOCGIFMEDIA:
924 	case SIOCSIFMEDIA:
925 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
926 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
927 		break;
928 	case SIOCGIFRXR:
929 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
930 		    NULL, MCLBYTES, &sc->sc_rx_ring);
931 		break;
932 	case SIOCGIFSFFPAGE:
933 		error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR);
934 		if (error != 0)
935 			break;
936 
937 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
938 		rw_exit(&mvneta_sff_lock);
939 		break;
940 	default:
941 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
942 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
943 		break;
944 	}
945 
946 	if (error == ENETRESET) {
947 		if (ifp->if_flags & IFF_RUNNING)
948 			mvneta_iff(sc);
949 		error = 0;
950 	}
951 
952 	splx(s);
953 
954 	return error;
955 }
956 
957 void
958 mvneta_port_change(struct mvneta_softc *sc)
959 {
960 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
961 		sc->sc_link = !sc->sc_link;
962 
963 		if (sc->sc_link) {
964 			if (!sc->sc_inband_status) {
965 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
966 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
967 				panc |= MVNETA_PANC_FORCELINKPASS;
968 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
969 			}
970 			mvneta_port_up(sc);
971 		} else {
972 			if (!sc->sc_inband_status) {
973 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
974 				panc &= ~MVNETA_PANC_FORCELINKPASS;
975 				panc |= MVNETA_PANC_FORCELINKFAIL;
976 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
977 			}
978 		}
979 	}
980 }
981 
982 void
983 mvneta_port_up(struct mvneta_softc *sc)
984 {
985 	/* Enable port RX/TX. */
986 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
987 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
988 }
989 
990 int
991 mvneta_up(struct mvneta_softc *sc)
992 {
993 	struct ifnet *ifp = &sc->sc_ac.ac_if;
994 	struct mvneta_buf *txb, *rxb;
995 	int i;
996 
997 	DPRINTFN(2, ("mvneta_up\n"));
998 
999 	/* Allocate Tx descriptor ring. */
1000 	sc->sc_txring = mvneta_dmamem_alloc(sc,
1001 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
1002 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
1003 
1004 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
1005 	    M_DEVBUF, M_WAITOK);
1006 
1007 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1008 		txb = &sc->sc_txbuf[i];
1009 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
1010 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1011 		txb->tb_m = NULL;
1012 	}
1013 
1014 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1015 	sc->sc_tx_cnt = 0;
1016 
1017 	/* Allocate Rx descriptor ring. */
1018 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
1019 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
1020 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
1021 
1022 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
1023 	    M_DEVBUF, M_WAITOK);
1024 
1025 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1026 		rxb = &sc->sc_rxbuf[i];
1027 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1028 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1029 		rxb->tb_m = NULL;
1030 	}
1031 
1032 	/* Set Rx descriptor ring data. */
1033 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
1034 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
1035 	    ((MCLBYTES >> 3) << 19));
1036 	MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
1037 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
1038 
1039 	/* Set Tx queue bandwidth. */
1040 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
1041 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
1042 
1043 	/* Set Tx descriptor ring data. */
1044 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
1045 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
1046 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT));
1047 
1048 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1049 
1050 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
1051 	mvneta_fill_rx_ring(sc);
1052 
1053 	/* TODO: correct frame size */
1054 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1055 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
1056 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
1057 
1058 	/* set max MTU */
1059 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
1060 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
1061 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
1062 
1063 	/* enable port */
1064 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1065 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
1066 
1067 	mvneta_enaddr_write(sc);
1068 
1069 	/* Program promiscuous mode and multicast filters. */
1070 	mvneta_iff(sc);
1071 
1072 	if (!sc->sc_fixed_link)
1073 		mii_mediachg(&sc->sc_mii);
1074 
1075 	if (sc->sc_link)
1076 		mvneta_port_up(sc);
1077 
1078 	/* Enable interrupt masks */
1079 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
1080 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_PMISCICSUMMARY);
1081 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1082 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
1083 
1084 	timeout_add_sec(&sc->sc_tick_ch, 1);
1085 
1086 	ifp->if_flags |= IFF_RUNNING;
1087 	ifq_clr_oactive(&ifp->if_snd);
1088 
1089 	return 0;
1090 }
1091 
1092 void
1093 mvneta_down(struct mvneta_softc *sc)
1094 {
1095 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1096 	uint32_t reg, txinprog, txfifoemp;
1097 	struct mvneta_buf *txb, *rxb;
1098 	int i, cnt;
1099 
1100 	DPRINTFN(2, ("mvneta_down\n"));
1101 
1102 	timeout_del(&sc->sc_tick_ch);
1103 
1104 	/* Stop Rx port activity. Check port Rx activity. */
1105 	reg = MVNETA_READ(sc, MVNETA_RQC);
1106 	if (reg & MVNETA_RQC_ENQ_MASK)
1107 		/* Issue stop command for active channels only */
1108 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1109 
1110 	/* Stop Tx port activity. Check port Tx activity. */
1111 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1112 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1113 
1114 	txinprog = MVNETA_PS_TXINPROG_(0);
1115 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1116 
1117 #define RX_DISABLE_TIMEOUT		0x1000000
1118 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1119 	/* Wait for all Rx activity to terminate. */
1120 	cnt = 0;
1121 	do {
1122 		if (cnt >= RX_DISABLE_TIMEOUT) {
1123 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1124 			    sc->sc_dev.dv_xname, reg);
1125 			break;
1126 		}
1127 		cnt++;
1128 
1129 		/*
1130 		 * Check Receive Queue Command register that all Rx queues
1131 		 * are stopped
1132 		 */
1133 		reg = MVNETA_READ(sc, MVNETA_RQC);
1134 	} while (reg & 0xff);
1135 
1136 	/* Double check to verify that TX FIFO is empty */
1137 	cnt = 0;
1138 	while (1) {
1139 		do {
1140 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1141 				printf("%s: timeout for TX FIFO empty. status "
1142 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1143 				break;
1144 			}
1145 			cnt++;
1146 
1147 			reg = MVNETA_READ(sc, MVNETA_PS);
1148 		} while (!(reg & txfifoemp) || reg & txinprog);
1149 
1150 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1151 			break;
1152 
1153 		/* Double check */
1154 		reg = MVNETA_READ(sc, MVNETA_PS);
1155 		if (reg & txfifoemp && !(reg & txinprog))
1156 			break;
1157 		else
1158 			printf("%s: TX FIFO empty double check failed."
1159 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1160 			    cnt, reg);
1161 	}
1162 
1163 	delay(200);
1164 
1165 	/* disable port */
1166 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1167 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1168 	delay(200);
1169 
1170 	/* mask all interrupts */
1171 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1172 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1173 
1174 	/* clear all cause registers */
1175 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1176 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1177 
1178 	/* Free RX and TX mbufs still in the queues. */
1179 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1180 		txb = &sc->sc_txbuf[i];
1181 		if (txb->tb_m) {
1182 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1183 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1184 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1185 			m_freem(txb->tb_m);
1186 		}
1187 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1188 	}
1189 
1190 	mvneta_dmamem_free(sc, sc->sc_txring);
1191 	free(sc->sc_txbuf, M_DEVBUF, 0);
1192 
1193 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1194 		rxb = &sc->sc_rxbuf[i];
1195 		if (rxb->tb_m) {
1196 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1197 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1198 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1199 			m_freem(rxb->tb_m);
1200 		}
1201 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1202 	}
1203 
1204 	mvneta_dmamem_free(sc, sc->sc_rxring);
1205 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1206 
1207 	/* reset RX and TX DMAs */
1208 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1209 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1210 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1211 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1212 
1213 	ifp->if_flags &= ~IFF_RUNNING;
1214 	ifq_clr_oactive(&ifp->if_snd);
1215 }
1216 
1217 void
1218 mvneta_watchdog(struct ifnet *ifp)
1219 {
1220 	struct mvneta_softc *sc = ifp->if_softc;
1221 
1222 	/*
1223 	 * Reclaim first as there is a possibility of losing Tx completion
1224 	 * interrupts.
1225 	 */
1226 	mvneta_tx_proc(sc);
1227 	if (sc->sc_tx_cnt != 0) {
1228 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1229 
1230 		ifp->if_oerrors++;
1231 	}
1232 }
1233 
1234 /*
1235  * Set media options.
1236  */
1237 int
1238 mvneta_mediachange(struct ifnet *ifp)
1239 {
1240 	struct mvneta_softc *sc = ifp->if_softc;
1241 
1242 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1243 		mii_mediachg(&sc->sc_mii);
1244 
1245 	return (0);
1246 }
1247 
1248 /*
1249  * Report current media status.
1250  */
1251 void
1252 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1253 {
1254 	struct mvneta_softc *sc = ifp->if_softc;
1255 
1256 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1257 		mii_pollstat(&sc->sc_mii);
1258 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1259 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1260 	}
1261 
1262 	if (sc->sc_fixed_link) {
1263 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1264 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1265 	}
1266 }
1267 
1268 int
1269 mvneta_encap(struct mvneta_softc *sc, struct mbuf *m, uint32_t *idx)
1270 {
1271 	struct mvneta_tx_desc *txd;
1272 	bus_dmamap_t map;
1273 	uint32_t cmdsts;
1274 	int i, current, first, last;
1275 
1276 	DPRINTFN(3, ("mvneta_encap\n"));
1277 
1278 	first = last = current = *idx;
1279 	map = sc->sc_txbuf[current].tb_map;
1280 
1281 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1282 		return (ENOBUFS);
1283 
1284 	if (map->dm_nsegs > (MVNETA_TX_RING_CNT - sc->sc_tx_cnt - 2)) {
1285 		bus_dmamap_unload(sc->sc_dmat, map);
1286 		return (ENOBUFS);
1287 	}
1288 
1289 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1290 	    BUS_DMASYNC_PREWRITE);
1291 
1292 	DPRINTFN(2, ("mvneta_encap: dm_nsegs=%d\n", map->dm_nsegs));
1293 
1294 	cmdsts = MVNETA_TX_L4_CSUM_NOT;
1295 #if notyet
1296 	int m_csumflags;
1297 	if (m_csumflags & M_CSUM_IPv4)
1298 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
1299 	if (m_csumflags & M_CSUM_TCPv4)
1300 		cmdsts |=
1301 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
1302 	if (m_csumflags & M_CSUM_UDPv4)
1303 		cmdsts |=
1304 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
1305 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1306 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
1307 
1308 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
1309 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
1310 	}
1311 #endif
1312 
1313 	for (i = 0; i < map->dm_nsegs; i++) {
1314 		txd = &sc->sc_txdesc[current];
1315 		memset(txd, 0, sizeof(*txd));
1316 		txd->bufptr = map->dm_segs[i].ds_addr;
1317 		txd->bytecnt = map->dm_segs[i].ds_len;
1318 		txd->cmdsts = cmdsts |
1319 		    MVNETA_TX_ZERO_PADDING;
1320 		if (i == 0)
1321 		    txd->cmdsts |= MVNETA_TX_FIRST_DESC;
1322 		if (i == (map->dm_nsegs - 1))
1323 		    txd->cmdsts |= MVNETA_TX_LAST_DESC;
1324 
1325 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring),
1326 		    current * sizeof(*txd), sizeof(*txd),
1327 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1328 
1329 		last = current;
1330 		current = MVNETA_TX_RING_NEXT(current);
1331 		KASSERT(current != sc->sc_tx_cons);
1332 	}
1333 
1334 	KASSERT(sc->sc_txbuf[last].tb_m == NULL);
1335 	sc->sc_txbuf[first].tb_map = sc->sc_txbuf[last].tb_map;
1336 	sc->sc_txbuf[last].tb_map = map;
1337 	sc->sc_txbuf[last].tb_m = m;
1338 
1339 	sc->sc_tx_cnt += map->dm_nsegs;
1340 	*idx = current;
1341 
1342 	/* Let him know we sent another packet. */
1343 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), map->dm_nsegs);
1344 
1345 	DPRINTFN(3, ("mvneta_encap: completed successfully\n"));
1346 
1347 	return 0;
1348 }
1349 
1350 void
1351 mvneta_rx_proc(struct mvneta_softc *sc)
1352 {
1353 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1354 	struct mvneta_rx_desc *rxd;
1355 	struct mvneta_buf *rxb;
1356 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1357 	struct mbuf *m;
1358 	uint32_t rxstat;
1359 	int i, idx, len, ready;
1360 
1361 	DPRINTFN(3, ("%s: %d\n", __func__, sc->sc_rx_cons));
1362 
1363 	if (!(ifp->if_flags & IFF_RUNNING))
1364 		return;
1365 
1366 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring), 0,
1367 	    MVNETA_DMA_LEN(sc->sc_rxring),
1368 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1369 
1370 	ready = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1371 	MVNETA_WRITE(sc, MVNETA_PRXSU(0), ready);
1372 
1373 	for (i = 0; i < ready; i++) {
1374 		idx = sc->sc_rx_cons;
1375 		KASSERT(idx < MVNETA_RX_RING_CNT);
1376 
1377 		rxd = &sc->sc_rxdesc[idx];
1378 
1379 #ifdef DIAGNOSTIC
1380 		if ((rxd->cmdsts &
1381 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC)) !=
1382 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC))
1383 			panic("%s: buffer size is smaller than packet",
1384 			    __func__);
1385 #endif
1386 
1387 		len = rxd->bytecnt;
1388 		rxb = &sc->sc_rxbuf[idx];
1389 		KASSERT(rxb->tb_m);
1390 
1391 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1392 		    len, BUS_DMASYNC_POSTREAD);
1393 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1394 
1395 		m = rxb->tb_m;
1396 		rxb->tb_m = NULL;
1397 		m->m_pkthdr.len = m->m_len = len;
1398 
1399 		rxstat = rxd->cmdsts;
1400 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1401 #if 0
1402 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1403 
1404 			if (err == MVNETA_RX_CRC_ERROR)
1405 				ifp->if_ierrors++;
1406 			if (err == MVNETA_RX_OVERRUN_ERROR)
1407 				ifp->if_ierrors++;
1408 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1409 				ifp->if_ierrors++;
1410 			if (err == MVNETA_RX_RESOURCE_ERROR)
1411 				ifp->if_ierrors++;
1412 #else
1413 			ifp->if_ierrors++;
1414 #endif
1415 			panic("%s: handle input errors", __func__);
1416 			continue;
1417 		}
1418 
1419 #if notyet
1420 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1421 			int flgs = 0;
1422 
1423 			/* Check IPv4 header checksum */
1424 			flgs |= M_CSUM_IPv4;
1425 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1426 				flgs |= M_CSUM_IPv4_BAD;
1427 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1428 				/*
1429 				 * Check TCPv4/UDPv4 checksum for
1430 				 * non-fragmented packet only.
1431 				 *
1432 				 * It seemd that sometimes
1433 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1434 				 * even if the checksum is correct and the
1435 				 * packet was not fragmented. So we don't set
1436 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1437 				 */
1438 
1439 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1440 					MVNETA_RX_L4_TYPE_TCP) &&
1441 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1442 					flgs |= M_CSUM_TCPv4;
1443 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1444 					MVNETA_RX_L4_TYPE_UDP) &&
1445 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1446 					flgs |= M_CSUM_UDPv4;
1447 			}
1448 			m->m_pkthdr.csum_flags = flgs;
1449 		}
1450 #endif
1451 
1452 		/* Skip on first 2byte (HW header) */
1453 		m_adj(m, MVNETA_HWHEADER_SIZE);
1454 
1455 		ml_enqueue(&ml, m);
1456 
1457 		if_rxr_put(&sc->sc_rx_ring, 1);
1458 
1459 		sc->sc_rx_cons = MVNETA_RX_RING_NEXT(idx);
1460 	}
1461 
1462 	if (ifiq_input(&ifp->if_rcv, &ml))
1463 		if_rxr_livelocked(&sc->sc_rx_ring);
1464 
1465 	mvneta_fill_rx_ring(sc);
1466 }
1467 
1468 void
1469 mvneta_tx_proc(struct mvneta_softc *sc)
1470 {
1471 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1472 	struct mvneta_tx_desc *txd;
1473 	struct mvneta_buf *txb;
1474 	int i, idx, sent;
1475 
1476 	DPRINTFN(3, ("%s\n", __func__));
1477 
1478 	if (!(ifp->if_flags & IFF_RUNNING))
1479 		return;
1480 
1481 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1482 	    MVNETA_DMA_LEN(sc->sc_txring),
1483 	    BUS_DMASYNC_POSTREAD);
1484 
1485 	sent = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1486 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NORB(sent));
1487 
1488 	for (i = 0; i < sent; i++) {
1489 		idx = sc->sc_tx_cons;
1490 		KASSERT(idx < MVNETA_TX_RING_CNT);
1491 
1492 		txd = &sc->sc_txdesc[idx];
1493 		txb = &sc->sc_txbuf[idx];
1494 		if (txb->tb_m) {
1495 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1496 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1497 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1498 
1499 			m_freem(txb->tb_m);
1500 			txb->tb_m = NULL;
1501 		}
1502 
1503 		ifq_clr_oactive(&ifp->if_snd);
1504 
1505 		sc->sc_tx_cnt--;
1506 
1507 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1508 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1509 
1510 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1511 				ifp->if_collisions++;
1512 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1513 				ifp->if_oerrors++;
1514 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1515 				ifp->if_collisions++;
1516 		}
1517 
1518 		sc->sc_tx_cons = MVNETA_TX_RING_NEXT(sc->sc_tx_cons);
1519 	}
1520 
1521 	if (sc->sc_tx_cnt == 0)
1522 		ifp->if_timer = 0;
1523 }
1524 
1525 uint8_t
1526 mvneta_crc8(const uint8_t *data, size_t size)
1527 {
1528 	int bit;
1529 	uint8_t byte;
1530 	uint8_t crc = 0;
1531 	const uint8_t poly = 0x07;
1532 
1533 	while(size--)
1534 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1535 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1536 
1537 	return crc;
1538 }
1539 
1540 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1541 
1542 void
1543 mvneta_iff(struct mvneta_softc *sc)
1544 {
1545 	struct arpcom *ac = &sc->sc_ac;
1546 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1547 	struct ether_multi *enm;
1548 	struct ether_multistep step;
1549 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1550 	uint32_t pxc;
1551 	int i;
1552 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1553 
1554 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1555 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1556 	ifp->if_flags &= ~IFF_ALLMULTI;
1557 	memset(dfut, 0, sizeof(dfut));
1558 	memset(dfsmt, 0, sizeof(dfsmt));
1559 	memset(dfomt, 0, sizeof(dfomt));
1560 
1561 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1562 		ifp->if_flags |= IFF_ALLMULTI;
1563 		if (ifp->if_flags & IFF_PROMISC)
1564 			pxc |= MVNETA_PXC_UPM;
1565 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1566 			dfsmt[i] = dfomt[i] =
1567 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1568 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1569 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1570 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1571 		}
1572 	} else {
1573 		ETHER_FIRST_MULTI(step, ac, enm);
1574 		while (enm != NULL) {
1575 			/* chip handles some IPv4 multicast specially */
1576 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1577 				i = enm->enm_addrlo[5];
1578 				dfsmt[i>>2] |=
1579 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1580 			} else {
1581 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1582 				dfomt[i>>2] |=
1583 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1584 			}
1585 
1586 			ETHER_NEXT_MULTI(step, enm);
1587 		}
1588 	}
1589 
1590 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1591 
1592 	/* Set Destination Address Filter Unicast Table */
1593 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1594 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1595 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1596 
1597 	/* Set Destination Address Filter Multicast Tables */
1598 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1599 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1600 }
1601 
1602 struct mvneta_dmamem *
1603 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1604 {
1605 	struct mvneta_dmamem *mdm;
1606 	int nsegs;
1607 
1608 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1609 	mdm->mdm_size = size;
1610 
1611 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1612 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1613 		goto mdmfree;
1614 
1615 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1616 	    &nsegs, BUS_DMA_WAITOK) != 0)
1617 		goto destroy;
1618 
1619 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1620 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1621 		goto free;
1622 
1623 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1624 	    NULL, BUS_DMA_WAITOK) != 0)
1625 		goto unmap;
1626 
1627 	bzero(mdm->mdm_kva, size);
1628 
1629 	return (mdm);
1630 
1631 unmap:
1632 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1633 free:
1634 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1635 destroy:
1636 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1637 mdmfree:
1638 	free(mdm, M_DEVBUF, 0);
1639 
1640 	return (NULL);
1641 }
1642 
1643 void
1644 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1645 {
1646 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1647 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1648 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1649 	free(mdm, M_DEVBUF, 0);
1650 }
1651 
1652 struct mbuf *
1653 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1654 {
1655 	struct mbuf *m = NULL;
1656 
1657 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1658 	if (!m)
1659 		return (NULL);
1660 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1661 
1662 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1663 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1664 		m_freem(m);
1665 		return (NULL);
1666 	}
1667 
1668 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1669 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1670 
1671 	return (m);
1672 }
1673 
1674 void
1675 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1676 {
1677 	struct mvneta_rx_desc *rxd;
1678 	struct mvneta_buf *rxb;
1679 	u_int slots;
1680 
1681 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_RX_RING_CNT);
1682 	    slots > 0; slots--) {
1683 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1684 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1685 		if (rxb->tb_m == NULL)
1686 			break;
1687 
1688 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1689 		memset(rxd, 0, sizeof(*rxd));
1690 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1691 
1692 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1693 		    sc->sc_rx_prod * sizeof(*rxd), sizeof(*rxd),
1694 		    BUS_DMASYNC_PREWRITE);
1695 
1696 		sc->sc_rx_prod = MVNETA_RX_RING_NEXT(sc->sc_rx_prod);
1697 
1698 		/* Tell him that there's a new free desc. */
1699 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1700 		    MVNETA_PRXSU_NOOFNEWDESCRIPTORS(1));
1701 	}
1702 
1703 	if_rxr_put(&sc->sc_rx_ring, slots);
1704 }
1705