xref: /openbsd-src/sys/dev/fdt/if_mvneta.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: if_mvneta.c,v 1.7 2019/04/30 20:26:02 patrick Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 
31 #include <sys/param.h>
32 #include <sys/device.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/errno.h>
36 #include <sys/kernel.h>
37 #include <sys/mutex.h>
38 #include <sys/socket.h>
39 #include <sys/sockio.h>
40 #include <uvm/uvm_extern.h>
41 #include <sys/mbuf.h>
42 
43 #include <machine/bus.h>
44 #include <machine/fdt.h>
45 
46 #include <dev/ofw/openfirm.h>
47 #include <dev/ofw/ofw_clock.h>
48 #include <dev/ofw/ofw_pinctrl.h>
49 #include <dev/ofw/fdt.h>
50 
51 #include <dev/fdt/if_mvnetareg.h>
52 #include <dev/fdt/mvmdiovar.h>
53 
54 #ifdef __armv7__
55 #include <armv7/marvell/mvmbusvar.h>
56 #endif
57 
58 #include <net/if.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 
62 #include <net/bpf.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/if_ether.h>
66 
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
69 
70 #if NBPFILTER > 0
71 #include <net/bpf.h>
72 #endif
73 
74 #ifdef MVNETA_DEBUG
75 #define DPRINTF(x)	if (mvneta_debug) printf x
76 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
77 int mvneta_debug = MVNETA_DEBUG;
78 #else
79 #define DPRINTF(x)
80 #define DPRINTFN(n,x)
81 #endif
82 
83 #define MVNETA_READ(sc, reg) \
84 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
85 #define MVNETA_WRITE(sc, reg, val) \
86 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
87 #define MVNETA_READ_FILTER(sc, reg, val, c) \
88 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
89 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
90 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
91 
92 #define MVNETA_LINKUP_READ(sc) \
93 	MVNETA_READ(sc, MVNETA_PS0)
94 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
95 
96 #define MVNETA_TX_RING_CNT	256
97 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
98 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
99 #define MVNETA_TX_QUEUE_CNT	1
100 #define MVNETA_RX_RING_CNT	256
101 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
102 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
103 #define MVNETA_RX_QUEUE_CNT	1
104 
105 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
106 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
107 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
108 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
109 
110 #define MVNETA_NTXSEG		30
111 
112 struct mvneta_dmamem {
113 	bus_dmamap_t		mdm_map;
114 	bus_dma_segment_t	mdm_seg;
115 	size_t			mdm_size;
116 	caddr_t			mdm_kva;
117 };
118 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
119 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
120 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
121 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
122 
123 struct mvneta_buf {
124 	bus_dmamap_t	tb_map;
125 	struct mbuf	*tb_m;
126 };
127 
128 struct mvneta_softc {
129 	struct device sc_dev;
130 	struct device *sc_mdio;
131 
132 	bus_space_tag_t sc_iot;
133 	bus_space_handle_t sc_ioh;
134 	bus_dma_tag_t sc_dmat;
135 
136 	struct arpcom sc_ac;
137 #define sc_enaddr sc_ac.ac_enaddr
138 	struct mii_data sc_mii;
139 #define sc_media sc_mii.mii_media
140 
141 	struct timeout sc_tick_ch;
142 
143 	struct mvneta_dmamem	*sc_txring;
144 	struct mvneta_buf	*sc_txbuf;
145 	struct mvneta_tx_desc	*sc_txdesc;
146 	int			 sc_tx_prod;	/* next free tx desc */
147 	int			 sc_tx_cnt;	/* amount of tx sent */
148 	int			 sc_tx_cons;	/* first tx desc sent */
149 
150 	struct mvneta_dmamem	*sc_rxring;
151 	struct mvneta_buf	*sc_rxbuf;
152 	struct mvneta_rx_desc	*sc_rxdesc;
153 	int			 sc_rx_prod;	/* next rx desc to fill */
154 	struct if_rxring	 sc_rx_ring;
155 	int			 sc_rx_cons;	/* next rx desc recvd */
156 
157 	enum {
158 		PHY_MODE_QSGMII,
159 		PHY_MODE_SGMII,
160 		PHY_MODE_RGMII,
161 		PHY_MODE_RGMII_ID,
162 	}			 sc_phy_mode;
163 	int			 sc_fixed_link;
164 	int			 sc_inband_status;
165 	int			 sc_phy;
166 	int			 sc_link;
167 };
168 
169 
170 int mvneta_miibus_readreg(struct device *, int, int);
171 void mvneta_miibus_writereg(struct device *, int, int, int);
172 void mvneta_miibus_statchg(struct device *);
173 
174 void mvneta_wininit(struct mvneta_softc *);
175 
176 /* Gigabit Ethernet Port part functions */
177 int mvneta_match(struct device *, void *, void *);
178 void mvneta_attach(struct device *, struct device *, void *);
179 void mvneta_attach_deferred(struct device *);
180 
181 void mvneta_tick(void *);
182 int mvneta_intr(void *);
183 
184 void mvneta_start(struct ifnet *);
185 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
186 void mvneta_inband_statchg(struct mvneta_softc *);
187 void mvneta_port_change(struct mvneta_softc *);
188 void mvneta_port_up(struct mvneta_softc *);
189 int mvneta_up(struct mvneta_softc *);
190 void mvneta_down(struct mvneta_softc *);
191 void mvneta_watchdog(struct ifnet *);
192 
193 int mvneta_mediachange(struct ifnet *);
194 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
195 
196 int mvneta_encap(struct mvneta_softc *, struct mbuf *, uint32_t *);
197 void mvneta_rx_proc(struct mvneta_softc *);
198 void mvneta_tx_proc(struct mvneta_softc *);
199 uint8_t mvneta_crc8(const uint8_t *, size_t);
200 void mvneta_iff(struct mvneta_softc *);
201 
202 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
203     bus_size_t, bus_size_t);
204 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
205 void mvneta_fill_rx_ring(struct mvneta_softc *);
206 
207 struct cfdriver mvneta_cd = {
208 	NULL, "mvneta", DV_IFNET
209 };
210 
211 struct cfattach mvneta_ca = {
212 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
213 };
214 
215 int
216 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
217 {
218 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
219 	return mvmdio_miibus_readreg(sc->sc_mdio, phy, reg);
220 }
221 
222 void
223 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
224 {
225 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
226 	return mvmdio_miibus_writereg(sc->sc_mdio, phy, reg, val);
227 }
228 
229 void
230 mvneta_miibus_statchg(struct device *self)
231 {
232 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
233 
234 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
235 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
236 
237 		panc &= ~(MVNETA_PANC_SETMIISPEED |
238 			  MVNETA_PANC_SETGMIISPEED |
239 			  MVNETA_PANC_SETFULLDX);
240 
241 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
242 		case IFM_1000_SX:
243 		case IFM_1000_LX:
244 		case IFM_1000_CX:
245 		case IFM_1000_T:
246 			panc |= MVNETA_PANC_SETGMIISPEED;
247 			break;
248 		case IFM_100_TX:
249 			panc |= MVNETA_PANC_SETMIISPEED;
250 			break;
251 		case IFM_10_T:
252 			break;
253 		}
254 
255 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
256 			panc |= MVNETA_PANC_SETFULLDX;
257 
258 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
259 	}
260 
261 	mvneta_port_change(sc);
262 }
263 
264 void
265 mvneta_inband_statchg(struct mvneta_softc *sc)
266 {
267 	uint32_t reg;
268 
269 	sc->sc_mii.mii_media_status = IFM_AVALID;
270 	sc->sc_mii.mii_media_active = IFM_ETHER;
271 
272 	reg = MVNETA_READ(sc, MVNETA_PS0);
273 	if (reg & MVNETA_PS0_LINKUP)
274 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
275 	if (reg & MVNETA_PS0_GMIISPEED)
276 		sc->sc_mii.mii_media_active |= IFM_1000_T;
277 	else if (reg & MVNETA_PS0_MIISPEED)
278 		sc->sc_mii.mii_media_active |= IFM_100_TX;
279 	else
280 		sc->sc_mii.mii_media_active |= IFM_10_T;
281 	if (reg & MVNETA_PS0_FULLDX)
282 		sc->sc_mii.mii_media_active |= IFM_FDX;
283 
284 	mvneta_port_change(sc);
285 }
286 
287 void
288 mvneta_enaddr_write(struct mvneta_softc *sc)
289 {
290 	uint32_t maddrh, maddrl;
291 	maddrh  = sc->sc_enaddr[0] << 24;
292 	maddrh |= sc->sc_enaddr[1] << 16;
293 	maddrh |= sc->sc_enaddr[2] << 8;
294 	maddrh |= sc->sc_enaddr[3];
295 	maddrl  = sc->sc_enaddr[4] << 8;
296 	maddrl |= sc->sc_enaddr[5];
297 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
298 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
299 }
300 
301 void
302 mvneta_wininit(struct mvneta_softc *sc)
303 {
304 #ifdef __armv7__
305 	uint32_t en;
306 	int i;
307 
308 	if (mvmbus_dram_info == NULL)
309 		panic("%s: mbus dram information not set up",
310 		    sc->sc_dev.dv_xname);
311 
312 	for (i = 0; i < MVNETA_NWINDOW; i++) {
313 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
314 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
315 
316 		if (i < MVNETA_NREMAP)
317 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
318 	}
319 
320 	en = MVNETA_BARE_EN_MASK;
321 
322 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
323 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
324 
325 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
326 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
327 		    MVNETA_BASEADDR_ATTR(win->attr)	|
328 		    MVNETA_BASEADDR_BASE(win->base));
329 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
330 
331 		en &= ~(1 << i);
332 	}
333 
334 	MVNETA_WRITE(sc, MVNETA_BARE, en);
335 #endif
336 }
337 
338 int
339 mvneta_match(struct device *parent, void *cfdata, void *aux)
340 {
341 	struct fdt_attach_args *faa = aux;
342 
343 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
344 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
345 }
346 
347 void
348 mvneta_attach(struct device *parent, struct device *self, void *aux)
349 {
350 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
351 	struct fdt_attach_args *faa = aux;
352 	uint32_t ctl0, ctl2, panc;
353 	struct ifnet *ifp;
354 	int i, len, node;
355 	char *phy_mode;
356 	char *managed;
357 
358 	printf("\n");
359 
360 	sc->sc_iot = faa->fa_iot;
361 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
362 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
363 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
364 		printf("%s: cannot map registers\n", self->dv_xname);
365 		return;
366 	}
367 	sc->sc_dmat = faa->fa_dmat;
368 
369 	clock_enable(faa->fa_node, NULL);
370 
371 	pinctrl_byname(faa->fa_node, "default");
372 
373 	len = OF_getproplen(faa->fa_node, "phy-mode");
374 	if (len <= 0) {
375 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
376 		return;
377 	}
378 
379 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
380 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
381 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
382 		sc->sc_phy_mode = PHY_MODE_QSGMII;
383 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
384 		sc->sc_phy_mode = PHY_MODE_SGMII;
385 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
386 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
387 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
388 		sc->sc_phy_mode = PHY_MODE_RGMII;
389 	else {
390 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
391 		    phy_mode);
392 		return;
393 	}
394 	free(phy_mode, M_TEMP, len);
395 
396 	/* TODO: check child's name to be "fixed-link" */
397 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
398 	    OF_child(faa->fa_node))
399 		sc->sc_fixed_link = 1;
400 
401 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
402 		managed = malloc(len, M_TEMP, M_WAITOK);
403 		OF_getprop(faa->fa_node, "managed", managed, len);
404 		if (!strncmp(managed, "in-band-status",
405 		    strlen("in-band-status"))) {
406 			sc->sc_fixed_link = 1;
407 			sc->sc_inband_status = 1;
408 		}
409 		free(managed, M_TEMP, len);
410 	}
411 
412 	if (!sc->sc_fixed_link) {
413 		node = OF_getnodebyphandle(OF_getpropint(faa->fa_node,
414 		    "phy", 0));
415 		if (!node) {
416 			printf("%s: cannot find phy in fdt\n", self->dv_xname);
417 			return;
418 		}
419 
420 		if ((sc->sc_phy = OF_getpropint(node, "reg", -1)) == -1) {
421 			printf("%s: cannot extract phy addr\n", self->dv_xname);
422 			return;
423 		}
424 	}
425 
426 	mvneta_wininit(sc);
427 
428 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
429 	    ETHER_ADDR_LEN) {
430 		OF_getprop(faa->fa_node, "local-mac-address",
431 		    sc->sc_enaddr, ETHER_ADDR_LEN);
432 		mvneta_enaddr_write(sc);
433 	} else {
434 		uint32_t maddrh, maddrl;
435 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
436 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
437 		if (maddrh || maddrl) {
438 			sc->sc_enaddr[0] = maddrh >> 24;
439 			sc->sc_enaddr[1] = maddrh >> 16;
440 			sc->sc_enaddr[2] = maddrh >> 8;
441 			sc->sc_enaddr[3] = maddrh >> 0;
442 			sc->sc_enaddr[4] = maddrl >> 8;
443 			sc->sc_enaddr[5] = maddrl >> 0;
444 		} else
445 			ether_fakeaddr(&sc->sc_ac.ac_if);
446 	}
447 
448 	printf("%s: Ethernet address %s\n", self->dv_xname,
449 	    ether_sprintf(sc->sc_enaddr));
450 
451 	/* disable port */
452 	MVNETA_WRITE(sc, MVNETA_PMACC0,
453 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
454 	delay(200);
455 
456 	/* clear all cause registers */
457 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
458 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
459 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
460 
461 	/* mask all interrupts */
462 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
463 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
464 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
465 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
466 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
467 
468 	/* enable MBUS Retry bit16 */
469 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
470 
471 	/* enable access for CPU0 */
472 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
473 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
474 
475 	/* reset RX and TX DMAs */
476 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
477 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
478 
479 	/* disable legacy WRR, disable EJP, release from reset */
480 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
481 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
482 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
483 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
484 	}
485 
486 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
487 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
488 
489 	/* set port acceleration mode */
490 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
491 
492 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
493 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
494 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
495 
496 	/* Set SDC register except IPGINT bits */
497 	MVNETA_WRITE(sc, MVNETA_SDC,
498 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
499 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
500 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
501 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
502 
503 	/* XXX: Disable PHY polling in hardware */
504 	MVNETA_WRITE(sc, MVNETA_EUC,
505 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
506 
507 	/* clear uni-/multicast tables */
508 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
509 	memset(dfut, 0, sizeof(dfut));
510 	memset(dfsmt, 0, sizeof(dfut));
511 	memset(dfomt, 0, sizeof(dfut));
512 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
513 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
514 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
515 
516 	MVNETA_WRITE(sc, MVNETA_PIE,
517 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
518 
519 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
520 
521 	/* Setup phy. */
522 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
523 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
524 	panc = MVNETA_READ(sc, MVNETA_PANC);
525 
526 	/* Force link down to change in-band settings. */
527 	panc &= ~MVNETA_PANC_FORCELINKPASS;
528 	panc |= MVNETA_PANC_FORCELINKFAIL;
529 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
530 
531 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
532 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
533 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
534 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
535 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
536 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
537 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
538 
539 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
540 	switch (sc->sc_phy_mode) {
541 	case PHY_MODE_QSGMII:
542 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
543 		    MVNETA_SERDESCFG_QSGMII_PROTO);
544 		ctl2 |= MVNETA_PMACC2_PCSEN;
545 		break;
546 	case PHY_MODE_SGMII:
547 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
548 		    MVNETA_SERDESCFG_SGMII_PROTO);
549 		ctl2 |= MVNETA_PMACC2_PCSEN;
550 		break;
551 	default:
552 		break;
553 	}
554 
555 	/* Use Auto-Negotiation for Inband Status only */
556 	if (sc->sc_inband_status) {
557 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
558 		    MVNETA_PANC_FORCELINKPASS);
559 		/* TODO: read mode from SFP */
560 		if (1) {
561 			/* 802.3z */
562 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
563 			panc |= (MVNETA_PANC_INBANDANEN |
564 			    MVNETA_PANC_SETGMIISPEED |
565 			    MVNETA_PANC_SETFULLDX);
566 		} else {
567 			/* SGMII */
568 			ctl2 |= MVNETA_PMACC2_INBANDAN;
569 			panc |= (MVNETA_PANC_INBANDANEN |
570 			    MVNETA_PANC_ANSPEEDEN |
571 			    MVNETA_PANC_ANDUPLEXEN);
572 		}
573 		MVNETA_WRITE(sc, MVNETA_OMSCD,
574 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
575 	} else {
576 		MVNETA_WRITE(sc, MVNETA_OMSCD,
577 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
578 	}
579 
580 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
581 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
582 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
583 
584 	/* Port reset */
585 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
586 		;
587 
588 	fdt_intr_establish(faa->fa_node, IPL_NET, mvneta_intr, sc,
589 	    sc->sc_dev.dv_xname);
590 
591 	ifp = &sc->sc_ac.ac_if;
592 	ifp->if_softc = sc;
593 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
594 	ifp->if_start = mvneta_start;
595 	ifp->if_ioctl = mvneta_ioctl;
596 	ifp->if_watchdog = mvneta_watchdog;
597 	ifp->if_capabilities = IFCAP_VLAN_MTU;
598 
599 #if notyet
600 	/*
601 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
602 	 */
603 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
604 				IFCAP_CSUM_UDPv4;
605 
606 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
607 	/*
608 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
609 	 */
610 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
611 #endif
612 
613 	IFQ_SET_MAXLEN(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
614 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
615 
616 	/*
617 	 * Do MII setup.
618 	 */
619 	sc->sc_mii.mii_ifp = ifp;
620 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
621 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
622 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
623 
624 	ifmedia_init(&sc->sc_mii.mii_media, 0,
625 	    mvneta_mediachange, mvneta_mediastatus);
626 
627 	if (!sc->sc_fixed_link) {
628 		extern void *mvmdio_sc;
629 		sc->sc_mdio = mvmdio_sc;
630 
631 		if (sc->sc_mdio == NULL) {
632 			config_defer(self, mvneta_attach_deferred);
633 			return;
634 		}
635 
636 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phy,
637 		    MII_OFFSET_ANY, 0);
638 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
639 			printf("%s: no PHY found!\n", self->dv_xname);
640 			ifmedia_add(&sc->sc_mii.mii_media,
641 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
642 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
643 		} else
644 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
645 	} else {
646 		ifmedia_add(&sc->sc_mii.mii_media,
647 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
648 		ifmedia_set(&sc->sc_mii.mii_media,
649 		    IFM_ETHER|IFM_MANUAL);
650 
651 		if (sc->sc_inband_status) {
652 			mvneta_inband_statchg(sc);
653 		} else {
654 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
655 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
656 			mvneta_miibus_statchg(self);
657 		}
658 
659 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
660 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
661 	}
662 
663 	/*
664 	 * Call MI attach routines.
665 	 */
666 	if_attach(ifp);
667 	ether_ifattach(ifp);
668 
669 	return;
670 }
671 
672 void
673 mvneta_attach_deferred(struct device *self)
674 {
675 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
676 	struct ifnet *ifp = &sc->sc_ac.ac_if;
677 
678 	extern void *mvmdio_sc;
679 	sc->sc_mdio = mvmdio_sc;
680 	if (sc->sc_mdio == NULL) {
681 		printf("%s: mdio bus not yet attached\n", self->dv_xname);
682 		return;
683 	}
684 
685 	mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phy,
686 	    MII_OFFSET_ANY, 0);
687 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
688 		printf("%s: no PHY found!\n", self->dv_xname);
689 		ifmedia_add(&sc->sc_mii.mii_media,
690 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
691 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
692 	} else
693 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
694 
695 	/*
696 	 * Call MI attach routines.
697 	 */
698 	if_attach(ifp);
699 	ether_ifattach(ifp);
700 
701 	return;
702 }
703 
704 void
705 mvneta_tick(void *arg)
706 {
707 	struct mvneta_softc *sc = arg;
708 	struct mii_data *mii = &sc->sc_mii;
709 	int s;
710 
711 	s = splnet();
712 	mii_tick(mii);
713 	splx(s);
714 
715 	timeout_add_sec(&sc->sc_tick_ch, 1);
716 }
717 
718 int
719 mvneta_intr(void *arg)
720 {
721 	struct mvneta_softc *sc = arg;
722 	struct ifnet *ifp = &sc->sc_ac.ac_if;
723 	uint32_t ic, misc;
724 
725 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
726 
727 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
728 		misc = MVNETA_READ(sc, MVNETA_PMIC);
729 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
730 		if (sc->sc_inband_status && (misc &
731 		    (MVNETA_PMI_PHYSTATUSCHNG |
732 		    MVNETA_PMI_LINKCHANGE |
733 		    MVNETA_PMI_PSCSYNCCHNG))) {
734 			mvneta_inband_statchg(sc);
735 		}
736 	}
737 
738 	if (!(ifp->if_flags & IFF_RUNNING))
739 		return 1;
740 
741 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
742 		mvneta_tx_proc(sc);
743 
744 	if (ic & MVNETA_PRXTXTI_RBICTAPQ(0))
745 		mvneta_rx_proc(sc);
746 
747 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
748 		mvneta_start(ifp);
749 
750 	return 1;
751 }
752 
753 void
754 mvneta_start(struct ifnet *ifp)
755 {
756 	struct mvneta_softc *sc = ifp->if_softc;
757 	struct mbuf *m_head = NULL;
758 	int idx;
759 
760 	DPRINTFN(3, ("mvneta_start (idx %d)\n", sc->sc_tx_prod));
761 
762 	if (!(ifp->if_flags & IFF_RUNNING))
763 		return;
764 	if (ifq_is_oactive(&ifp->if_snd))
765 		return;
766 	if (IFQ_IS_EMPTY(&ifp->if_snd))
767 		return;
768 
769 	/* If Link is DOWN, can't start TX */
770 	if (!MVNETA_IS_LINKUP(sc))
771 		return;
772 
773 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
774 	    MVNETA_DMA_LEN(sc->sc_txring),
775 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
776 
777 	idx = sc->sc_tx_prod;
778 	while (sc->sc_tx_cnt < MVNETA_TX_RING_CNT) {
779 		m_head = ifq_deq_begin(&ifp->if_snd);
780 		if (m_head == NULL)
781 			break;
782 
783 		/*
784 		 * Pack the data into the transmit ring. If we
785 		 * don't have room, set the OACTIVE flag and wait
786 		 * for the NIC to drain the ring.
787 		 */
788 		if (mvneta_encap(sc, m_head, &idx)) {
789 			ifq_deq_rollback(&ifp->if_snd, m_head);
790 			ifq_set_oactive(&ifp->if_snd);
791 			break;
792 		}
793 
794 		/* now we are committed to transmit the packet */
795 		ifq_deq_commit(&ifp->if_snd, m_head);
796 
797 		/*
798 		 * If there's a BPF listener, bounce a copy of this frame
799 		 * to him.
800 		 */
801 #if NBPFILTER > 0
802 		if (ifp->if_bpf)
803 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
804 #endif
805 	}
806 
807 	if (sc->sc_tx_prod != idx) {
808 		sc->sc_tx_prod = idx;
809 
810 		/*
811 		 * Set a timeout in case the chip goes out to lunch.
812 		 */
813 		ifp->if_timer = 5;
814 	}
815 }
816 
817 int
818 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
819 {
820 	struct mvneta_softc *sc = ifp->if_softc;
821 	struct ifreq *ifr = (struct ifreq *)addr;
822 	int s, error = 0;
823 
824 	s = splnet();
825 
826 	switch (cmd) {
827 	case SIOCSIFADDR:
828 		ifp->if_flags |= IFF_UP;
829 		/* FALLTHROUGH */
830 	case SIOCSIFFLAGS:
831 		if (ifp->if_flags & IFF_UP) {
832 			if (ifp->if_flags & IFF_RUNNING)
833 				error = ENETRESET;
834 			else
835 				mvneta_up(sc);
836 		} else {
837 			if (ifp->if_flags & IFF_RUNNING)
838 				mvneta_down(sc);
839 		}
840 		break;
841 	case SIOCGIFMEDIA:
842 	case SIOCSIFMEDIA:
843 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
844 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
845 		break;
846 	case SIOCGIFRXR:
847 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
848 		    NULL, MCLBYTES, &sc->sc_rx_ring);
849 		break;
850 	default:
851 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
852 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
853 		break;
854 	}
855 
856 	if (error == ENETRESET) {
857 		if (ifp->if_flags & IFF_RUNNING)
858 			mvneta_iff(sc);
859 		error = 0;
860 	}
861 
862 	splx(s);
863 
864 	return error;
865 }
866 
867 void
868 mvneta_port_change(struct mvneta_softc *sc)
869 {
870 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
871 		sc->sc_link = !sc->sc_link;
872 
873 		if (sc->sc_link) {
874 			if (!sc->sc_inband_status) {
875 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
876 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
877 				panc |= MVNETA_PANC_FORCELINKPASS;
878 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
879 			}
880 			mvneta_port_up(sc);
881 		} else {
882 			if (!sc->sc_inband_status) {
883 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
884 				panc &= ~MVNETA_PANC_FORCELINKPASS;
885 				panc |= MVNETA_PANC_FORCELINKFAIL;
886 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
887 			}
888 		}
889 	}
890 }
891 
892 void
893 mvneta_port_up(struct mvneta_softc *sc)
894 {
895 	/* Enable port RX/TX. */
896 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
897 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
898 }
899 
900 int
901 mvneta_up(struct mvneta_softc *sc)
902 {
903 	struct ifnet *ifp = &sc->sc_ac.ac_if;
904 	struct mvneta_buf *txb, *rxb;
905 	int i;
906 
907 	DPRINTFN(2, ("mvneta_up\n"));
908 
909 	/* Allocate Tx descriptor ring. */
910 	sc->sc_txring = mvneta_dmamem_alloc(sc,
911 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
912 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
913 
914 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
915 	    M_DEVBUF, M_WAITOK);
916 
917 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
918 		txb = &sc->sc_txbuf[i];
919 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
920 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
921 		txb->tb_m = NULL;
922 	}
923 
924 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
925 	sc->sc_tx_cnt = 0;
926 
927 	/* Allocate Rx descriptor ring. */
928 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
929 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
930 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
931 
932 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
933 	    M_DEVBUF, M_WAITOK);
934 
935 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
936 		rxb = &sc->sc_rxbuf[i];
937 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
938 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
939 		rxb->tb_m = NULL;
940 	}
941 
942 	/* Set Rx descriptor ring data. */
943 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
944 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
945 	    ((MCLBYTES >> 3) << 19));
946 	MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
947 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
948 
949 	/* Set Tx queue bandwidth. */
950 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
951 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
952 
953 	/* Set Tx descriptor ring data. */
954 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
955 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
956 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT));
957 
958 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
959 
960 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
961 	mvneta_fill_rx_ring(sc);
962 
963 	/* TODO: correct frame size */
964 	MVNETA_WRITE(sc, MVNETA_PMACC0,
965 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
966 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
967 
968 	/* set max MTU */
969 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
970 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
971 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
972 
973 	/* enable port */
974 	MVNETA_WRITE(sc, MVNETA_PMACC0,
975 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
976 
977 	mvneta_enaddr_write(sc);
978 
979 	/* Program promiscuous mode and multicast filters. */
980 	mvneta_iff(sc);
981 
982 	if (!sc->sc_fixed_link)
983 		mii_mediachg(&sc->sc_mii);
984 
985 	if (sc->sc_link)
986 		mvneta_port_up(sc);
987 
988 	/* Enable interrupt masks */
989 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
990 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_PMISCICSUMMARY);
991 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
992 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
993 
994 	timeout_add_sec(&sc->sc_tick_ch, 1);
995 
996 	ifp->if_flags |= IFF_RUNNING;
997 	ifq_clr_oactive(&ifp->if_snd);
998 
999 	return 0;
1000 }
1001 
1002 void
1003 mvneta_down(struct mvneta_softc *sc)
1004 {
1005 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1006 	uint32_t reg, txinprog, txfifoemp;
1007 	struct mvneta_buf *txb, *rxb;
1008 	int i, cnt;
1009 
1010 	DPRINTFN(2, ("mvneta_down\n"));
1011 
1012 	timeout_del(&sc->sc_tick_ch);
1013 
1014 	/* Stop Rx port activity. Check port Rx activity. */
1015 	reg = MVNETA_READ(sc, MVNETA_RQC);
1016 	if (reg & MVNETA_RQC_ENQ_MASK)
1017 		/* Issue stop command for active channels only */
1018 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1019 
1020 	/* Stop Tx port activity. Check port Tx activity. */
1021 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1022 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1023 
1024 	txinprog = MVNETA_PS_TXINPROG_(0);
1025 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1026 
1027 #define RX_DISABLE_TIMEOUT		0x1000000
1028 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1029 	/* Wait for all Rx activity to terminate. */
1030 	cnt = 0;
1031 	do {
1032 		if (cnt >= RX_DISABLE_TIMEOUT) {
1033 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1034 			    sc->sc_dev.dv_xname, reg);
1035 			break;
1036 		}
1037 		cnt++;
1038 
1039 		/*
1040 		 * Check Receive Queue Command register that all Rx queues
1041 		 * are stopped
1042 		 */
1043 		reg = MVNETA_READ(sc, MVNETA_RQC);
1044 	} while (reg & 0xff);
1045 
1046 	/* Double check to verify that TX FIFO is empty */
1047 	cnt = 0;
1048 	while (1) {
1049 		do {
1050 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1051 				printf("%s: timeout for TX FIFO empty. status "
1052 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1053 				break;
1054 			}
1055 			cnt++;
1056 
1057 			reg = MVNETA_READ(sc, MVNETA_PS);
1058 		} while (!(reg & txfifoemp) || reg & txinprog);
1059 
1060 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1061 			break;
1062 
1063 		/* Double check */
1064 		reg = MVNETA_READ(sc, MVNETA_PS);
1065 		if (reg & txfifoemp && !(reg & txinprog))
1066 			break;
1067 		else
1068 			printf("%s: TX FIFO empty double check failed."
1069 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1070 			    cnt, reg);
1071 	}
1072 
1073 	delay(200);
1074 
1075 	/* disable port */
1076 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1077 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1078 	delay(200);
1079 
1080 	/* mask all interrupts */
1081 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1082 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1083 
1084 	/* clear all cause registers */
1085 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1086 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1087 
1088 	/* Free RX and TX mbufs still in the queues. */
1089 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1090 		txb = &sc->sc_txbuf[i];
1091 		if (txb->tb_m) {
1092 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1093 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1094 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1095 			m_freem(txb->tb_m);
1096 		}
1097 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1098 	}
1099 
1100 	mvneta_dmamem_free(sc, sc->sc_txring);
1101 	free(sc->sc_txbuf, M_DEVBUF, 0);
1102 
1103 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1104 		rxb = &sc->sc_rxbuf[i];
1105 		if (rxb->tb_m) {
1106 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1107 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1108 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1109 			m_freem(rxb->tb_m);
1110 		}
1111 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1112 	}
1113 
1114 	mvneta_dmamem_free(sc, sc->sc_rxring);
1115 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1116 
1117 	/* reset RX and TX DMAs */
1118 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1119 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1120 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1121 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1122 
1123 	ifp->if_flags &= ~IFF_RUNNING;
1124 	ifq_clr_oactive(&ifp->if_snd);
1125 }
1126 
1127 void
1128 mvneta_watchdog(struct ifnet *ifp)
1129 {
1130 	struct mvneta_softc *sc = ifp->if_softc;
1131 
1132 	/*
1133 	 * Reclaim first as there is a possibility of losing Tx completion
1134 	 * interrupts.
1135 	 */
1136 	mvneta_tx_proc(sc);
1137 	if (sc->sc_tx_cnt != 0) {
1138 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1139 
1140 		ifp->if_oerrors++;
1141 	}
1142 }
1143 
1144 /*
1145  * Set media options.
1146  */
1147 int
1148 mvneta_mediachange(struct ifnet *ifp)
1149 {
1150 	struct mvneta_softc *sc = ifp->if_softc;
1151 
1152 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1153 		mii_mediachg(&sc->sc_mii);
1154 
1155 	return (0);
1156 }
1157 
1158 /*
1159  * Report current media status.
1160  */
1161 void
1162 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1163 {
1164 	struct mvneta_softc *sc = ifp->if_softc;
1165 
1166 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1167 		mii_pollstat(&sc->sc_mii);
1168 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1169 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1170 	}
1171 
1172 	if (sc->sc_fixed_link) {
1173 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1174 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1175 	}
1176 }
1177 
1178 int
1179 mvneta_encap(struct mvneta_softc *sc, struct mbuf *m, uint32_t *idx)
1180 {
1181 	struct mvneta_tx_desc *txd;
1182 	bus_dmamap_t map;
1183 	uint32_t cmdsts;
1184 	int i, current, first, last;
1185 
1186 	DPRINTFN(3, ("mvneta_encap\n"));
1187 
1188 	first = last = current = *idx;
1189 	map = sc->sc_txbuf[current].tb_map;
1190 
1191 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1192 		return (ENOBUFS);
1193 
1194 	if (map->dm_nsegs > (MVNETA_TX_RING_CNT - sc->sc_tx_cnt - 2)) {
1195 		bus_dmamap_unload(sc->sc_dmat, map);
1196 		return (ENOBUFS);
1197 	}
1198 
1199 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1200 	    BUS_DMASYNC_PREWRITE);
1201 
1202 	DPRINTFN(2, ("mvneta_encap: dm_nsegs=%d\n", map->dm_nsegs));
1203 
1204 	cmdsts = MVNETA_TX_L4_CSUM_NOT;
1205 #if notyet
1206 	int m_csumflags;
1207 	if (m_csumflags & M_CSUM_IPv4)
1208 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
1209 	if (m_csumflags & M_CSUM_TCPv4)
1210 		cmdsts |=
1211 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
1212 	if (m_csumflags & M_CSUM_UDPv4)
1213 		cmdsts |=
1214 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
1215 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1216 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
1217 
1218 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
1219 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
1220 	}
1221 #endif
1222 
1223 	for (i = 0; i < map->dm_nsegs; i++) {
1224 		txd = &sc->sc_txdesc[current];
1225 		memset(txd, 0, sizeof(*txd));
1226 		txd->bufptr = map->dm_segs[i].ds_addr;
1227 		txd->bytecnt = map->dm_segs[i].ds_len;
1228 		txd->cmdsts = cmdsts |
1229 		    MVNETA_TX_ZERO_PADDING;
1230 		if (i == 0)
1231 		    txd->cmdsts |= MVNETA_TX_FIRST_DESC;
1232 		if (i == (map->dm_nsegs - 1))
1233 		    txd->cmdsts |= MVNETA_TX_LAST_DESC;
1234 
1235 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring),
1236 		    current * sizeof(*txd), sizeof(*txd),
1237 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1238 
1239 		last = current;
1240 		current = MVNETA_TX_RING_NEXT(current);
1241 		KASSERT(current != sc->sc_tx_cons);
1242 	}
1243 
1244 	KASSERT(sc->sc_txbuf[last].tb_m == NULL);
1245 	sc->sc_txbuf[first].tb_map = sc->sc_txbuf[last].tb_map;
1246 	sc->sc_txbuf[last].tb_map = map;
1247 	sc->sc_txbuf[last].tb_m = m;
1248 
1249 	sc->sc_tx_cnt += map->dm_nsegs;
1250 	*idx = current;
1251 
1252 	/* Let him know we sent another packet. */
1253 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), map->dm_nsegs);
1254 
1255 	DPRINTFN(3, ("mvneta_encap: completed successfully\n"));
1256 
1257 	return 0;
1258 }
1259 
1260 void
1261 mvneta_rx_proc(struct mvneta_softc *sc)
1262 {
1263 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1264 	struct mvneta_rx_desc *rxd;
1265 	struct mvneta_buf *rxb;
1266 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1267 	struct mbuf *m;
1268 	uint32_t rxstat;
1269 	int i, idx, len, ready;
1270 
1271 	DPRINTFN(3, ("%s: %d\n", __func__, sc->sc_rx_cons));
1272 
1273 	if (!(ifp->if_flags & IFF_RUNNING))
1274 		return;
1275 
1276 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring), 0,
1277 	    MVNETA_DMA_LEN(sc->sc_rxring),
1278 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1279 
1280 	ready = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1281 	MVNETA_WRITE(sc, MVNETA_PRXSU(0), ready);
1282 
1283 	for (i = 0; i < ready; i++) {
1284 		idx = sc->sc_rx_cons;
1285 		KASSERT(idx < MVNETA_RX_RING_CNT);
1286 
1287 		rxd = &sc->sc_rxdesc[idx];
1288 
1289 #ifdef DIAGNOSTIC
1290 		if ((rxd->cmdsts &
1291 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC)) !=
1292 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC))
1293 			panic("%s: buffer size is smaller than packet",
1294 			    __func__);
1295 #endif
1296 
1297 		len = rxd->bytecnt;
1298 		rxb = &sc->sc_rxbuf[idx];
1299 		KASSERT(rxb->tb_m);
1300 
1301 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1302 		    len, BUS_DMASYNC_POSTREAD);
1303 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1304 
1305 		m = rxb->tb_m;
1306 		rxb->tb_m = NULL;
1307 		m->m_pkthdr.len = m->m_len = len;
1308 
1309 		rxstat = rxd->cmdsts;
1310 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1311 #if 0
1312 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1313 
1314 			if (err == MVNETA_RX_CRC_ERROR)
1315 				ifp->if_ierrors++;
1316 			if (err == MVNETA_RX_OVERRUN_ERROR)
1317 				ifp->if_ierrors++;
1318 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1319 				ifp->if_ierrors++;
1320 			if (err == MVNETA_RX_RESOURCE_ERROR)
1321 				ifp->if_ierrors++;
1322 #else
1323 			ifp->if_ierrors++;
1324 #endif
1325 			panic("%s: handle input errors", __func__);
1326 			continue;
1327 		}
1328 
1329 #if notyet
1330 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1331 			int flgs = 0;
1332 
1333 			/* Check IPv4 header checksum */
1334 			flgs |= M_CSUM_IPv4;
1335 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1336 				flgs |= M_CSUM_IPv4_BAD;
1337 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1338 				/*
1339 				 * Check TCPv4/UDPv4 checksum for
1340 				 * non-fragmented packet only.
1341 				 *
1342 				 * It seemd that sometimes
1343 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1344 				 * even if the checksum is correct and the
1345 				 * packet was not fragmented. So we don't set
1346 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1347 				 */
1348 
1349 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1350 					MVNETA_RX_L4_TYPE_TCP) &&
1351 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1352 					flgs |= M_CSUM_TCPv4;
1353 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1354 					MVNETA_RX_L4_TYPE_UDP) &&
1355 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1356 					flgs |= M_CSUM_UDPv4;
1357 			}
1358 			m->m_pkthdr.csum_flags = flgs;
1359 		}
1360 #endif
1361 
1362 		/* Skip on first 2byte (HW header) */
1363 		m_adj(m, MVNETA_HWHEADER_SIZE);
1364 
1365 		ml_enqueue(&ml, m);
1366 
1367 		if_rxr_put(&sc->sc_rx_ring, 1);
1368 
1369 		sc->sc_rx_cons = MVNETA_RX_RING_NEXT(idx);
1370 	}
1371 
1372 	mvneta_fill_rx_ring(sc);
1373 
1374 	if_input(ifp, &ml);
1375 }
1376 
1377 void
1378 mvneta_tx_proc(struct mvneta_softc *sc)
1379 {
1380 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1381 	struct mvneta_tx_desc *txd;
1382 	struct mvneta_buf *txb;
1383 	int i, idx, sent;
1384 
1385 	DPRINTFN(3, ("%s\n", __func__));
1386 
1387 	if (!(ifp->if_flags & IFF_RUNNING))
1388 		return;
1389 
1390 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1391 	    MVNETA_DMA_LEN(sc->sc_txring),
1392 	    BUS_DMASYNC_POSTREAD);
1393 
1394 	sent = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1395 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NORB(sent));
1396 
1397 	for (i = 0; i < sent; i++) {
1398 		idx = sc->sc_tx_cons;
1399 		KASSERT(idx < MVNETA_TX_RING_CNT);
1400 
1401 		txd = &sc->sc_txdesc[idx];
1402 		txb = &sc->sc_txbuf[idx];
1403 		if (txb->tb_m) {
1404 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1405 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1406 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1407 
1408 			m_freem(txb->tb_m);
1409 			txb->tb_m = NULL;
1410 		}
1411 
1412 		ifq_clr_oactive(&ifp->if_snd);
1413 
1414 		sc->sc_tx_cnt--;
1415 
1416 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1417 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1418 
1419 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1420 				ifp->if_collisions++;
1421 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1422 				ifp->if_oerrors++;
1423 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1424 				ifp->if_collisions++;
1425 		}
1426 
1427 		sc->sc_tx_cons = MVNETA_TX_RING_NEXT(sc->sc_tx_cons);
1428 	}
1429 
1430 	if (sc->sc_tx_cnt == 0)
1431 		ifp->if_timer = 0;
1432 }
1433 
1434 uint8_t
1435 mvneta_crc8(const uint8_t *data, size_t size)
1436 {
1437 	int bit;
1438 	uint8_t byte;
1439 	uint8_t crc = 0;
1440 	const uint8_t poly = 0x07;
1441 
1442 	while(size--)
1443 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1444 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1445 
1446 	return crc;
1447 }
1448 
1449 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1450 
1451 void
1452 mvneta_iff(struct mvneta_softc *sc)
1453 {
1454 	struct arpcom *ac = &sc->sc_ac;
1455 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1456 	struct ether_multi *enm;
1457 	struct ether_multistep step;
1458 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1459 	uint32_t pxc;
1460 	int i;
1461 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1462 
1463 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1464 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1465 	ifp->if_flags &= ~IFF_ALLMULTI;
1466 	memset(dfut, 0, sizeof(dfut));
1467 	memset(dfsmt, 0, sizeof(dfsmt));
1468 	memset(dfomt, 0, sizeof(dfomt));
1469 
1470 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1471 		ifp->if_flags |= IFF_ALLMULTI;
1472 		if (ifp->if_flags & IFF_PROMISC)
1473 			pxc |= MVNETA_PXC_UPM;
1474 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1475 			dfsmt[i] = dfomt[i] =
1476 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1477 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1478 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1479 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1480 		}
1481 	} else {
1482 		ETHER_FIRST_MULTI(step, ac, enm);
1483 		while (enm != NULL) {
1484 			/* chip handles some IPv4 multicast specially */
1485 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1486 				i = enm->enm_addrlo[5];
1487 				dfsmt[i>>2] |=
1488 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1489 			} else {
1490 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1491 				dfomt[i>>2] |=
1492 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1493 			}
1494 
1495 			ETHER_NEXT_MULTI(step, enm);
1496 		}
1497 	}
1498 
1499 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1500 
1501 	/* Set Destination Address Filter Unicast Table */
1502 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1503 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1504 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1505 
1506 	/* Set Destination Address Filter Multicast Tables */
1507 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1508 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1509 }
1510 
1511 struct mvneta_dmamem *
1512 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1513 {
1514 	struct mvneta_dmamem *mdm;
1515 	int nsegs;
1516 
1517 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1518 	mdm->mdm_size = size;
1519 
1520 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1521 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1522 		goto mdmfree;
1523 
1524 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1525 	    &nsegs, BUS_DMA_WAITOK) != 0)
1526 		goto destroy;
1527 
1528 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1529 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1530 		goto free;
1531 
1532 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1533 	    NULL, BUS_DMA_WAITOK) != 0)
1534 		goto unmap;
1535 
1536 	bzero(mdm->mdm_kva, size);
1537 
1538 	return (mdm);
1539 
1540 unmap:
1541 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1542 free:
1543 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1544 destroy:
1545 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1546 mdmfree:
1547 	free(mdm, M_DEVBUF, 0);
1548 
1549 	return (NULL);
1550 }
1551 
1552 void
1553 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1554 {
1555 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1556 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1557 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1558 	free(mdm, M_DEVBUF, 0);
1559 }
1560 
1561 struct mbuf *
1562 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1563 {
1564 	struct mbuf *m = NULL;
1565 
1566 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1567 	if (!m)
1568 		return (NULL);
1569 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1570 
1571 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1572 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1573 		m_freem(m);
1574 		return (NULL);
1575 	}
1576 
1577 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1578 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1579 
1580 	return (m);
1581 }
1582 
1583 void
1584 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1585 {
1586 	struct mvneta_rx_desc *rxd;
1587 	struct mvneta_buf *rxb;
1588 	u_int slots;
1589 
1590 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_RX_RING_CNT);
1591 	    slots > 0; slots--) {
1592 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1593 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1594 		if (rxb->tb_m == NULL)
1595 			break;
1596 
1597 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1598 		memset(rxd, 0, sizeof(*rxd));
1599 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1600 
1601 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1602 		    sc->sc_rx_prod * sizeof(*rxd), sizeof(*rxd),
1603 		    BUS_DMASYNC_PREWRITE);
1604 
1605 		sc->sc_rx_prod = MVNETA_RX_RING_NEXT(sc->sc_rx_prod);
1606 
1607 		/* Tell him that there's a new free desc. */
1608 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1609 		    MVNETA_PRXSU_NOOFNEWDESCRIPTORS(1));
1610 	}
1611 
1612 	if_rxr_put(&sc->sc_rx_ring, slots);
1613 }
1614