xref: /openbsd-src/sys/dev/fdt/if_mvneta.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_mvneta.c,v 1.8 2019/09/07 13:33:00 patrick Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 
31 #include <sys/param.h>
32 #include <sys/device.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/errno.h>
36 #include <sys/kernel.h>
37 #include <sys/mutex.h>
38 #include <sys/socket.h>
39 #include <sys/sockio.h>
40 #include <uvm/uvm_extern.h>
41 #include <sys/mbuf.h>
42 
43 #include <machine/bus.h>
44 #include <machine/fdt.h>
45 
46 #include <dev/ofw/openfirm.h>
47 #include <dev/ofw/ofw_clock.h>
48 #include <dev/ofw/ofw_misc.h>
49 #include <dev/ofw/ofw_pinctrl.h>
50 #include <dev/ofw/fdt.h>
51 
52 #include <dev/fdt/if_mvnetareg.h>
53 #include <dev/fdt/mvmdiovar.h>
54 
55 #ifdef __armv7__
56 #include <armv7/marvell/mvmbusvar.h>
57 #endif
58 
59 #include <net/if.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 
63 #include <net/bpf.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/if_ether.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #ifdef MVNETA_DEBUG
76 #define DPRINTF(x)	if (mvneta_debug) printf x
77 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
78 int mvneta_debug = MVNETA_DEBUG;
79 #else
80 #define DPRINTF(x)
81 #define DPRINTFN(n,x)
82 #endif
83 
84 #define MVNETA_READ(sc, reg) \
85 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
86 #define MVNETA_WRITE(sc, reg, val) \
87 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
88 #define MVNETA_READ_FILTER(sc, reg, val, c) \
89 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
90 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
91 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
92 
93 #define MVNETA_LINKUP_READ(sc) \
94 	MVNETA_READ(sc, MVNETA_PS0)
95 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
96 
97 #define MVNETA_TX_RING_CNT	256
98 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
99 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
100 #define MVNETA_TX_QUEUE_CNT	1
101 #define MVNETA_RX_RING_CNT	256
102 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
103 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
104 #define MVNETA_RX_QUEUE_CNT	1
105 
106 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
107 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
108 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
109 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
110 
111 #define MVNETA_NTXSEG		30
112 
113 struct mvneta_dmamem {
114 	bus_dmamap_t		mdm_map;
115 	bus_dma_segment_t	mdm_seg;
116 	size_t			mdm_size;
117 	caddr_t			mdm_kva;
118 };
119 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
120 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
121 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
122 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
123 
124 struct mvneta_buf {
125 	bus_dmamap_t	tb_map;
126 	struct mbuf	*tb_m;
127 };
128 
129 struct mvneta_softc {
130 	struct device sc_dev;
131 	struct device *sc_mdio;
132 
133 	bus_space_tag_t sc_iot;
134 	bus_space_handle_t sc_ioh;
135 	bus_dma_tag_t sc_dmat;
136 
137 	struct arpcom sc_ac;
138 #define sc_enaddr sc_ac.ac_enaddr
139 	struct mii_data sc_mii;
140 #define sc_media sc_mii.mii_media
141 
142 	struct timeout sc_tick_ch;
143 
144 	struct mvneta_dmamem	*sc_txring;
145 	struct mvneta_buf	*sc_txbuf;
146 	struct mvneta_tx_desc	*sc_txdesc;
147 	int			 sc_tx_prod;	/* next free tx desc */
148 	int			 sc_tx_cnt;	/* amount of tx sent */
149 	int			 sc_tx_cons;	/* first tx desc sent */
150 
151 	struct mvneta_dmamem	*sc_rxring;
152 	struct mvneta_buf	*sc_rxbuf;
153 	struct mvneta_rx_desc	*sc_rxdesc;
154 	int			 sc_rx_prod;	/* next rx desc to fill */
155 	struct if_rxring	 sc_rx_ring;
156 	int			 sc_rx_cons;	/* next rx desc recvd */
157 
158 	enum {
159 		PHY_MODE_QSGMII,
160 		PHY_MODE_SGMII,
161 		PHY_MODE_RGMII,
162 		PHY_MODE_RGMII_ID,
163 	}			 sc_phy_mode;
164 	int			 sc_fixed_link;
165 	int			 sc_inband_status;
166 	int			 sc_phy;
167 	int			 sc_link;
168 	int			 sc_sfp;
169 };
170 
171 
172 int mvneta_miibus_readreg(struct device *, int, int);
173 void mvneta_miibus_writereg(struct device *, int, int, int);
174 void mvneta_miibus_statchg(struct device *);
175 
176 void mvneta_wininit(struct mvneta_softc *);
177 
178 /* Gigabit Ethernet Port part functions */
179 int mvneta_match(struct device *, void *, void *);
180 void mvneta_attach(struct device *, struct device *, void *);
181 void mvneta_attach_deferred(struct device *);
182 
183 void mvneta_tick(void *);
184 int mvneta_intr(void *);
185 
186 void mvneta_start(struct ifnet *);
187 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
188 void mvneta_inband_statchg(struct mvneta_softc *);
189 void mvneta_port_change(struct mvneta_softc *);
190 void mvneta_port_up(struct mvneta_softc *);
191 int mvneta_up(struct mvneta_softc *);
192 void mvneta_down(struct mvneta_softc *);
193 void mvneta_watchdog(struct ifnet *);
194 
195 int mvneta_mediachange(struct ifnet *);
196 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
197 
198 int mvneta_encap(struct mvneta_softc *, struct mbuf *, uint32_t *);
199 void mvneta_rx_proc(struct mvneta_softc *);
200 void mvneta_tx_proc(struct mvneta_softc *);
201 uint8_t mvneta_crc8(const uint8_t *, size_t);
202 void mvneta_iff(struct mvneta_softc *);
203 
204 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
205     bus_size_t, bus_size_t);
206 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
207 void mvneta_fill_rx_ring(struct mvneta_softc *);
208 
209 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff");
210 
211 struct cfdriver mvneta_cd = {
212 	NULL, "mvneta", DV_IFNET
213 };
214 
215 struct cfattach mvneta_ca = {
216 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
217 };
218 
219 int
220 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
221 {
222 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
223 	return mvmdio_miibus_readreg(sc->sc_mdio, phy, reg);
224 }
225 
226 void
227 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
228 {
229 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
230 	return mvmdio_miibus_writereg(sc->sc_mdio, phy, reg, val);
231 }
232 
233 void
234 mvneta_miibus_statchg(struct device *self)
235 {
236 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
237 
238 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
239 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
240 
241 		panc &= ~(MVNETA_PANC_SETMIISPEED |
242 			  MVNETA_PANC_SETGMIISPEED |
243 			  MVNETA_PANC_SETFULLDX);
244 
245 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
246 		case IFM_1000_SX:
247 		case IFM_1000_LX:
248 		case IFM_1000_CX:
249 		case IFM_1000_T:
250 			panc |= MVNETA_PANC_SETGMIISPEED;
251 			break;
252 		case IFM_100_TX:
253 			panc |= MVNETA_PANC_SETMIISPEED;
254 			break;
255 		case IFM_10_T:
256 			break;
257 		}
258 
259 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
260 			panc |= MVNETA_PANC_SETFULLDX;
261 
262 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
263 	}
264 
265 	mvneta_port_change(sc);
266 }
267 
268 void
269 mvneta_inband_statchg(struct mvneta_softc *sc)
270 {
271 	uint32_t reg;
272 
273 	sc->sc_mii.mii_media_status = IFM_AVALID;
274 	sc->sc_mii.mii_media_active = IFM_ETHER;
275 
276 	reg = MVNETA_READ(sc, MVNETA_PS0);
277 	if (reg & MVNETA_PS0_LINKUP)
278 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
279 	if (reg & MVNETA_PS0_GMIISPEED)
280 		sc->sc_mii.mii_media_active |= IFM_1000_T;
281 	else if (reg & MVNETA_PS0_MIISPEED)
282 		sc->sc_mii.mii_media_active |= IFM_100_TX;
283 	else
284 		sc->sc_mii.mii_media_active |= IFM_10_T;
285 	if (reg & MVNETA_PS0_FULLDX)
286 		sc->sc_mii.mii_media_active |= IFM_FDX;
287 
288 	mvneta_port_change(sc);
289 }
290 
291 void
292 mvneta_enaddr_write(struct mvneta_softc *sc)
293 {
294 	uint32_t maddrh, maddrl;
295 	maddrh  = sc->sc_enaddr[0] << 24;
296 	maddrh |= sc->sc_enaddr[1] << 16;
297 	maddrh |= sc->sc_enaddr[2] << 8;
298 	maddrh |= sc->sc_enaddr[3];
299 	maddrl  = sc->sc_enaddr[4] << 8;
300 	maddrl |= sc->sc_enaddr[5];
301 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
302 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
303 }
304 
305 void
306 mvneta_wininit(struct mvneta_softc *sc)
307 {
308 #ifdef __armv7__
309 	uint32_t en;
310 	int i;
311 
312 	if (mvmbus_dram_info == NULL)
313 		panic("%s: mbus dram information not set up",
314 		    sc->sc_dev.dv_xname);
315 
316 	for (i = 0; i < MVNETA_NWINDOW; i++) {
317 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
318 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
319 
320 		if (i < MVNETA_NREMAP)
321 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
322 	}
323 
324 	en = MVNETA_BARE_EN_MASK;
325 
326 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
327 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
328 
329 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
330 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
331 		    MVNETA_BASEADDR_ATTR(win->attr)	|
332 		    MVNETA_BASEADDR_BASE(win->base));
333 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
334 
335 		en &= ~(1 << i);
336 	}
337 
338 	MVNETA_WRITE(sc, MVNETA_BARE, en);
339 #endif
340 }
341 
342 int
343 mvneta_match(struct device *parent, void *cfdata, void *aux)
344 {
345 	struct fdt_attach_args *faa = aux;
346 
347 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
348 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
349 }
350 
351 void
352 mvneta_attach(struct device *parent, struct device *self, void *aux)
353 {
354 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
355 	struct fdt_attach_args *faa = aux;
356 	uint32_t ctl0, ctl2, panc;
357 	struct ifnet *ifp;
358 	int i, len, node;
359 	char *phy_mode;
360 	char *managed;
361 
362 	printf("\n");
363 
364 	sc->sc_iot = faa->fa_iot;
365 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
366 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
367 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
368 		printf("%s: cannot map registers\n", self->dv_xname);
369 		return;
370 	}
371 	sc->sc_dmat = faa->fa_dmat;
372 
373 	clock_enable(faa->fa_node, NULL);
374 
375 	pinctrl_byname(faa->fa_node, "default");
376 
377 	len = OF_getproplen(faa->fa_node, "phy-mode");
378 	if (len <= 0) {
379 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
380 		return;
381 	}
382 
383 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
384 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
385 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
386 		sc->sc_phy_mode = PHY_MODE_QSGMII;
387 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
388 		sc->sc_phy_mode = PHY_MODE_SGMII;
389 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
390 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
391 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
392 		sc->sc_phy_mode = PHY_MODE_RGMII;
393 	else {
394 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
395 		    phy_mode);
396 		return;
397 	}
398 	free(phy_mode, M_TEMP, len);
399 
400 	/* TODO: check child's name to be "fixed-link" */
401 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
402 	    OF_child(faa->fa_node))
403 		sc->sc_fixed_link = 1;
404 
405 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
406 		managed = malloc(len, M_TEMP, M_WAITOK);
407 		OF_getprop(faa->fa_node, "managed", managed, len);
408 		if (!strncmp(managed, "in-band-status",
409 		    strlen("in-band-status"))) {
410 			sc->sc_fixed_link = 1;
411 			sc->sc_inband_status = 1;
412 		}
413 		free(managed, M_TEMP, len);
414 	}
415 
416 	if (!sc->sc_fixed_link) {
417 		node = OF_getnodebyphandle(OF_getpropint(faa->fa_node,
418 		    "phy", 0));
419 		if (!node) {
420 			printf("%s: cannot find phy in fdt\n", self->dv_xname);
421 			return;
422 		}
423 
424 		if ((sc->sc_phy = OF_getpropint(node, "reg", -1)) == -1) {
425 			printf("%s: cannot extract phy addr\n", self->dv_xname);
426 			return;
427 		}
428 	}
429 
430 	mvneta_wininit(sc);
431 
432 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
433 	    ETHER_ADDR_LEN) {
434 		OF_getprop(faa->fa_node, "local-mac-address",
435 		    sc->sc_enaddr, ETHER_ADDR_LEN);
436 		mvneta_enaddr_write(sc);
437 	} else {
438 		uint32_t maddrh, maddrl;
439 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
440 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
441 		if (maddrh || maddrl) {
442 			sc->sc_enaddr[0] = maddrh >> 24;
443 			sc->sc_enaddr[1] = maddrh >> 16;
444 			sc->sc_enaddr[2] = maddrh >> 8;
445 			sc->sc_enaddr[3] = maddrh >> 0;
446 			sc->sc_enaddr[4] = maddrl >> 8;
447 			sc->sc_enaddr[5] = maddrl >> 0;
448 		} else
449 			ether_fakeaddr(&sc->sc_ac.ac_if);
450 	}
451 
452 	sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0);
453 
454 	printf("%s: Ethernet address %s\n", self->dv_xname,
455 	    ether_sprintf(sc->sc_enaddr));
456 
457 	/* disable port */
458 	MVNETA_WRITE(sc, MVNETA_PMACC0,
459 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
460 	delay(200);
461 
462 	/* clear all cause registers */
463 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
464 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
465 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
466 
467 	/* mask all interrupts */
468 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
469 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
470 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
471 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
472 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
473 
474 	/* enable MBUS Retry bit16 */
475 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
476 
477 	/* enable access for CPU0 */
478 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
479 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
480 
481 	/* reset RX and TX DMAs */
482 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
483 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
484 
485 	/* disable legacy WRR, disable EJP, release from reset */
486 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
487 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
488 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
489 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
490 	}
491 
492 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
493 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
494 
495 	/* set port acceleration mode */
496 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
497 
498 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
499 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
500 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
501 
502 	/* Set SDC register except IPGINT bits */
503 	MVNETA_WRITE(sc, MVNETA_SDC,
504 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
505 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
506 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
507 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
508 
509 	/* XXX: Disable PHY polling in hardware */
510 	MVNETA_WRITE(sc, MVNETA_EUC,
511 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
512 
513 	/* clear uni-/multicast tables */
514 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
515 	memset(dfut, 0, sizeof(dfut));
516 	memset(dfsmt, 0, sizeof(dfut));
517 	memset(dfomt, 0, sizeof(dfut));
518 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
519 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
520 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
521 
522 	MVNETA_WRITE(sc, MVNETA_PIE,
523 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
524 
525 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
526 
527 	/* Setup phy. */
528 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
529 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
530 	panc = MVNETA_READ(sc, MVNETA_PANC);
531 
532 	/* Force link down to change in-band settings. */
533 	panc &= ~MVNETA_PANC_FORCELINKPASS;
534 	panc |= MVNETA_PANC_FORCELINKFAIL;
535 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
536 
537 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
538 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
539 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
540 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
541 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
542 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
543 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
544 
545 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
546 	switch (sc->sc_phy_mode) {
547 	case PHY_MODE_QSGMII:
548 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
549 		    MVNETA_SERDESCFG_QSGMII_PROTO);
550 		ctl2 |= MVNETA_PMACC2_PCSEN;
551 		break;
552 	case PHY_MODE_SGMII:
553 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
554 		    MVNETA_SERDESCFG_SGMII_PROTO);
555 		ctl2 |= MVNETA_PMACC2_PCSEN;
556 		break;
557 	default:
558 		break;
559 	}
560 
561 	/* Use Auto-Negotiation for Inband Status only */
562 	if (sc->sc_inband_status) {
563 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
564 		    MVNETA_PANC_FORCELINKPASS);
565 		/* TODO: read mode from SFP */
566 		if (1) {
567 			/* 802.3z */
568 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
569 			panc |= (MVNETA_PANC_INBANDANEN |
570 			    MVNETA_PANC_SETGMIISPEED |
571 			    MVNETA_PANC_SETFULLDX);
572 		} else {
573 			/* SGMII */
574 			ctl2 |= MVNETA_PMACC2_INBANDAN;
575 			panc |= (MVNETA_PANC_INBANDANEN |
576 			    MVNETA_PANC_ANSPEEDEN |
577 			    MVNETA_PANC_ANDUPLEXEN);
578 		}
579 		MVNETA_WRITE(sc, MVNETA_OMSCD,
580 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
581 	} else {
582 		MVNETA_WRITE(sc, MVNETA_OMSCD,
583 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
584 	}
585 
586 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
587 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
588 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
589 
590 	/* Port reset */
591 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
592 		;
593 
594 	fdt_intr_establish(faa->fa_node, IPL_NET, mvneta_intr, sc,
595 	    sc->sc_dev.dv_xname);
596 
597 	ifp = &sc->sc_ac.ac_if;
598 	ifp->if_softc = sc;
599 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
600 	ifp->if_start = mvneta_start;
601 	ifp->if_ioctl = mvneta_ioctl;
602 	ifp->if_watchdog = mvneta_watchdog;
603 	ifp->if_capabilities = IFCAP_VLAN_MTU;
604 
605 #if notyet
606 	/*
607 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
608 	 */
609 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
610 				IFCAP_CSUM_UDPv4;
611 
612 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
613 	/*
614 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
615 	 */
616 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
617 #endif
618 
619 	IFQ_SET_MAXLEN(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
620 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
621 
622 	/*
623 	 * Do MII setup.
624 	 */
625 	sc->sc_mii.mii_ifp = ifp;
626 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
627 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
628 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
629 
630 	ifmedia_init(&sc->sc_mii.mii_media, 0,
631 	    mvneta_mediachange, mvneta_mediastatus);
632 
633 	if (!sc->sc_fixed_link) {
634 		extern void *mvmdio_sc;
635 		sc->sc_mdio = mvmdio_sc;
636 
637 		if (sc->sc_mdio == NULL) {
638 			config_defer(self, mvneta_attach_deferred);
639 			return;
640 		}
641 
642 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phy,
643 		    MII_OFFSET_ANY, 0);
644 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
645 			printf("%s: no PHY found!\n", self->dv_xname);
646 			ifmedia_add(&sc->sc_mii.mii_media,
647 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
648 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
649 		} else
650 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
651 	} else {
652 		ifmedia_add(&sc->sc_mii.mii_media,
653 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
654 		ifmedia_set(&sc->sc_mii.mii_media,
655 		    IFM_ETHER|IFM_MANUAL);
656 
657 		if (sc->sc_inband_status) {
658 			mvneta_inband_statchg(sc);
659 		} else {
660 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
661 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
662 			mvneta_miibus_statchg(self);
663 		}
664 
665 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
666 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
667 	}
668 
669 	/*
670 	 * Call MI attach routines.
671 	 */
672 	if_attach(ifp);
673 	ether_ifattach(ifp);
674 
675 	return;
676 }
677 
678 void
679 mvneta_attach_deferred(struct device *self)
680 {
681 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
682 	struct ifnet *ifp = &sc->sc_ac.ac_if;
683 
684 	extern void *mvmdio_sc;
685 	sc->sc_mdio = mvmdio_sc;
686 	if (sc->sc_mdio == NULL) {
687 		printf("%s: mdio bus not yet attached\n", self->dv_xname);
688 		return;
689 	}
690 
691 	mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phy,
692 	    MII_OFFSET_ANY, 0);
693 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
694 		printf("%s: no PHY found!\n", self->dv_xname);
695 		ifmedia_add(&sc->sc_mii.mii_media,
696 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
697 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
698 	} else
699 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
700 
701 	/*
702 	 * Call MI attach routines.
703 	 */
704 	if_attach(ifp);
705 	ether_ifattach(ifp);
706 
707 	return;
708 }
709 
710 void
711 mvneta_tick(void *arg)
712 {
713 	struct mvneta_softc *sc = arg;
714 	struct mii_data *mii = &sc->sc_mii;
715 	int s;
716 
717 	s = splnet();
718 	mii_tick(mii);
719 	splx(s);
720 
721 	timeout_add_sec(&sc->sc_tick_ch, 1);
722 }
723 
724 int
725 mvneta_intr(void *arg)
726 {
727 	struct mvneta_softc *sc = arg;
728 	struct ifnet *ifp = &sc->sc_ac.ac_if;
729 	uint32_t ic, misc;
730 
731 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
732 
733 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
734 		misc = MVNETA_READ(sc, MVNETA_PMIC);
735 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
736 		if (sc->sc_inband_status && (misc &
737 		    (MVNETA_PMI_PHYSTATUSCHNG |
738 		    MVNETA_PMI_LINKCHANGE |
739 		    MVNETA_PMI_PSCSYNCCHNG))) {
740 			mvneta_inband_statchg(sc);
741 		}
742 	}
743 
744 	if (!(ifp->if_flags & IFF_RUNNING))
745 		return 1;
746 
747 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
748 		mvneta_tx_proc(sc);
749 
750 	if (ic & MVNETA_PRXTXTI_RBICTAPQ(0))
751 		mvneta_rx_proc(sc);
752 
753 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
754 		mvneta_start(ifp);
755 
756 	return 1;
757 }
758 
759 void
760 mvneta_start(struct ifnet *ifp)
761 {
762 	struct mvneta_softc *sc = ifp->if_softc;
763 	struct mbuf *m_head = NULL;
764 	int idx;
765 
766 	DPRINTFN(3, ("mvneta_start (idx %d)\n", sc->sc_tx_prod));
767 
768 	if (!(ifp->if_flags & IFF_RUNNING))
769 		return;
770 	if (ifq_is_oactive(&ifp->if_snd))
771 		return;
772 	if (IFQ_IS_EMPTY(&ifp->if_snd))
773 		return;
774 
775 	/* If Link is DOWN, can't start TX */
776 	if (!MVNETA_IS_LINKUP(sc))
777 		return;
778 
779 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
780 	    MVNETA_DMA_LEN(sc->sc_txring),
781 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
782 
783 	idx = sc->sc_tx_prod;
784 	while (sc->sc_tx_cnt < MVNETA_TX_RING_CNT) {
785 		m_head = ifq_deq_begin(&ifp->if_snd);
786 		if (m_head == NULL)
787 			break;
788 
789 		/*
790 		 * Pack the data into the transmit ring. If we
791 		 * don't have room, set the OACTIVE flag and wait
792 		 * for the NIC to drain the ring.
793 		 */
794 		if (mvneta_encap(sc, m_head, &idx)) {
795 			ifq_deq_rollback(&ifp->if_snd, m_head);
796 			ifq_set_oactive(&ifp->if_snd);
797 			break;
798 		}
799 
800 		/* now we are committed to transmit the packet */
801 		ifq_deq_commit(&ifp->if_snd, m_head);
802 
803 		/*
804 		 * If there's a BPF listener, bounce a copy of this frame
805 		 * to him.
806 		 */
807 #if NBPFILTER > 0
808 		if (ifp->if_bpf)
809 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
810 #endif
811 	}
812 
813 	if (sc->sc_tx_prod != idx) {
814 		sc->sc_tx_prod = idx;
815 
816 		/*
817 		 * Set a timeout in case the chip goes out to lunch.
818 		 */
819 		ifp->if_timer = 5;
820 	}
821 }
822 
823 int
824 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
825 {
826 	struct mvneta_softc *sc = ifp->if_softc;
827 	struct ifreq *ifr = (struct ifreq *)addr;
828 	int s, error = 0;
829 
830 	s = splnet();
831 
832 	switch (cmd) {
833 	case SIOCSIFADDR:
834 		ifp->if_flags |= IFF_UP;
835 		/* FALLTHROUGH */
836 	case SIOCSIFFLAGS:
837 		if (ifp->if_flags & IFF_UP) {
838 			if (ifp->if_flags & IFF_RUNNING)
839 				error = ENETRESET;
840 			else
841 				mvneta_up(sc);
842 		} else {
843 			if (ifp->if_flags & IFF_RUNNING)
844 				mvneta_down(sc);
845 		}
846 		break;
847 	case SIOCGIFMEDIA:
848 	case SIOCSIFMEDIA:
849 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
850 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
851 		break;
852 	case SIOCGIFRXR:
853 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
854 		    NULL, MCLBYTES, &sc->sc_rx_ring);
855 		break;
856 	case SIOCGIFSFFPAGE:
857 		error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR);
858 		if (error != 0)
859 			break;
860 
861 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
862 		rw_exit(&mvneta_sff_lock);
863 		break;
864 	default:
865 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
866 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
867 		break;
868 	}
869 
870 	if (error == ENETRESET) {
871 		if (ifp->if_flags & IFF_RUNNING)
872 			mvneta_iff(sc);
873 		error = 0;
874 	}
875 
876 	splx(s);
877 
878 	return error;
879 }
880 
881 void
882 mvneta_port_change(struct mvneta_softc *sc)
883 {
884 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
885 		sc->sc_link = !sc->sc_link;
886 
887 		if (sc->sc_link) {
888 			if (!sc->sc_inband_status) {
889 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
890 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
891 				panc |= MVNETA_PANC_FORCELINKPASS;
892 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
893 			}
894 			mvneta_port_up(sc);
895 		} else {
896 			if (!sc->sc_inband_status) {
897 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
898 				panc &= ~MVNETA_PANC_FORCELINKPASS;
899 				panc |= MVNETA_PANC_FORCELINKFAIL;
900 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
901 			}
902 		}
903 	}
904 }
905 
906 void
907 mvneta_port_up(struct mvneta_softc *sc)
908 {
909 	/* Enable port RX/TX. */
910 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
911 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
912 }
913 
914 int
915 mvneta_up(struct mvneta_softc *sc)
916 {
917 	struct ifnet *ifp = &sc->sc_ac.ac_if;
918 	struct mvneta_buf *txb, *rxb;
919 	int i;
920 
921 	DPRINTFN(2, ("mvneta_up\n"));
922 
923 	/* Allocate Tx descriptor ring. */
924 	sc->sc_txring = mvneta_dmamem_alloc(sc,
925 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
926 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
927 
928 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
929 	    M_DEVBUF, M_WAITOK);
930 
931 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
932 		txb = &sc->sc_txbuf[i];
933 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
934 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
935 		txb->tb_m = NULL;
936 	}
937 
938 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
939 	sc->sc_tx_cnt = 0;
940 
941 	/* Allocate Rx descriptor ring. */
942 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
943 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
944 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
945 
946 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
947 	    M_DEVBUF, M_WAITOK);
948 
949 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
950 		rxb = &sc->sc_rxbuf[i];
951 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
952 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
953 		rxb->tb_m = NULL;
954 	}
955 
956 	/* Set Rx descriptor ring data. */
957 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
958 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
959 	    ((MCLBYTES >> 3) << 19));
960 	MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
961 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
962 
963 	/* Set Tx queue bandwidth. */
964 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
965 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
966 
967 	/* Set Tx descriptor ring data. */
968 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
969 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
970 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT));
971 
972 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
973 
974 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
975 	mvneta_fill_rx_ring(sc);
976 
977 	/* TODO: correct frame size */
978 	MVNETA_WRITE(sc, MVNETA_PMACC0,
979 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
980 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
981 
982 	/* set max MTU */
983 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
984 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
985 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
986 
987 	/* enable port */
988 	MVNETA_WRITE(sc, MVNETA_PMACC0,
989 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
990 
991 	mvneta_enaddr_write(sc);
992 
993 	/* Program promiscuous mode and multicast filters. */
994 	mvneta_iff(sc);
995 
996 	if (!sc->sc_fixed_link)
997 		mii_mediachg(&sc->sc_mii);
998 
999 	if (sc->sc_link)
1000 		mvneta_port_up(sc);
1001 
1002 	/* Enable interrupt masks */
1003 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
1004 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_PMISCICSUMMARY);
1005 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1006 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
1007 
1008 	timeout_add_sec(&sc->sc_tick_ch, 1);
1009 
1010 	ifp->if_flags |= IFF_RUNNING;
1011 	ifq_clr_oactive(&ifp->if_snd);
1012 
1013 	return 0;
1014 }
1015 
1016 void
1017 mvneta_down(struct mvneta_softc *sc)
1018 {
1019 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1020 	uint32_t reg, txinprog, txfifoemp;
1021 	struct mvneta_buf *txb, *rxb;
1022 	int i, cnt;
1023 
1024 	DPRINTFN(2, ("mvneta_down\n"));
1025 
1026 	timeout_del(&sc->sc_tick_ch);
1027 
1028 	/* Stop Rx port activity. Check port Rx activity. */
1029 	reg = MVNETA_READ(sc, MVNETA_RQC);
1030 	if (reg & MVNETA_RQC_ENQ_MASK)
1031 		/* Issue stop command for active channels only */
1032 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1033 
1034 	/* Stop Tx port activity. Check port Tx activity. */
1035 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1036 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1037 
1038 	txinprog = MVNETA_PS_TXINPROG_(0);
1039 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1040 
1041 #define RX_DISABLE_TIMEOUT		0x1000000
1042 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1043 	/* Wait for all Rx activity to terminate. */
1044 	cnt = 0;
1045 	do {
1046 		if (cnt >= RX_DISABLE_TIMEOUT) {
1047 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1048 			    sc->sc_dev.dv_xname, reg);
1049 			break;
1050 		}
1051 		cnt++;
1052 
1053 		/*
1054 		 * Check Receive Queue Command register that all Rx queues
1055 		 * are stopped
1056 		 */
1057 		reg = MVNETA_READ(sc, MVNETA_RQC);
1058 	} while (reg & 0xff);
1059 
1060 	/* Double check to verify that TX FIFO is empty */
1061 	cnt = 0;
1062 	while (1) {
1063 		do {
1064 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1065 				printf("%s: timeout for TX FIFO empty. status "
1066 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1067 				break;
1068 			}
1069 			cnt++;
1070 
1071 			reg = MVNETA_READ(sc, MVNETA_PS);
1072 		} while (!(reg & txfifoemp) || reg & txinprog);
1073 
1074 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1075 			break;
1076 
1077 		/* Double check */
1078 		reg = MVNETA_READ(sc, MVNETA_PS);
1079 		if (reg & txfifoemp && !(reg & txinprog))
1080 			break;
1081 		else
1082 			printf("%s: TX FIFO empty double check failed."
1083 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1084 			    cnt, reg);
1085 	}
1086 
1087 	delay(200);
1088 
1089 	/* disable port */
1090 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1091 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1092 	delay(200);
1093 
1094 	/* mask all interrupts */
1095 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1096 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1097 
1098 	/* clear all cause registers */
1099 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1100 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1101 
1102 	/* Free RX and TX mbufs still in the queues. */
1103 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1104 		txb = &sc->sc_txbuf[i];
1105 		if (txb->tb_m) {
1106 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1107 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1108 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1109 			m_freem(txb->tb_m);
1110 		}
1111 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1112 	}
1113 
1114 	mvneta_dmamem_free(sc, sc->sc_txring);
1115 	free(sc->sc_txbuf, M_DEVBUF, 0);
1116 
1117 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1118 		rxb = &sc->sc_rxbuf[i];
1119 		if (rxb->tb_m) {
1120 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1121 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1122 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1123 			m_freem(rxb->tb_m);
1124 		}
1125 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1126 	}
1127 
1128 	mvneta_dmamem_free(sc, sc->sc_rxring);
1129 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1130 
1131 	/* reset RX and TX DMAs */
1132 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1133 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1134 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1135 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1136 
1137 	ifp->if_flags &= ~IFF_RUNNING;
1138 	ifq_clr_oactive(&ifp->if_snd);
1139 }
1140 
1141 void
1142 mvneta_watchdog(struct ifnet *ifp)
1143 {
1144 	struct mvneta_softc *sc = ifp->if_softc;
1145 
1146 	/*
1147 	 * Reclaim first as there is a possibility of losing Tx completion
1148 	 * interrupts.
1149 	 */
1150 	mvneta_tx_proc(sc);
1151 	if (sc->sc_tx_cnt != 0) {
1152 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1153 
1154 		ifp->if_oerrors++;
1155 	}
1156 }
1157 
1158 /*
1159  * Set media options.
1160  */
1161 int
1162 mvneta_mediachange(struct ifnet *ifp)
1163 {
1164 	struct mvneta_softc *sc = ifp->if_softc;
1165 
1166 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1167 		mii_mediachg(&sc->sc_mii);
1168 
1169 	return (0);
1170 }
1171 
1172 /*
1173  * Report current media status.
1174  */
1175 void
1176 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1177 {
1178 	struct mvneta_softc *sc = ifp->if_softc;
1179 
1180 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1181 		mii_pollstat(&sc->sc_mii);
1182 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1183 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1184 	}
1185 
1186 	if (sc->sc_fixed_link) {
1187 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1188 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1189 	}
1190 }
1191 
1192 int
1193 mvneta_encap(struct mvneta_softc *sc, struct mbuf *m, uint32_t *idx)
1194 {
1195 	struct mvneta_tx_desc *txd;
1196 	bus_dmamap_t map;
1197 	uint32_t cmdsts;
1198 	int i, current, first, last;
1199 
1200 	DPRINTFN(3, ("mvneta_encap\n"));
1201 
1202 	first = last = current = *idx;
1203 	map = sc->sc_txbuf[current].tb_map;
1204 
1205 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1206 		return (ENOBUFS);
1207 
1208 	if (map->dm_nsegs > (MVNETA_TX_RING_CNT - sc->sc_tx_cnt - 2)) {
1209 		bus_dmamap_unload(sc->sc_dmat, map);
1210 		return (ENOBUFS);
1211 	}
1212 
1213 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1214 	    BUS_DMASYNC_PREWRITE);
1215 
1216 	DPRINTFN(2, ("mvneta_encap: dm_nsegs=%d\n", map->dm_nsegs));
1217 
1218 	cmdsts = MVNETA_TX_L4_CSUM_NOT;
1219 #if notyet
1220 	int m_csumflags;
1221 	if (m_csumflags & M_CSUM_IPv4)
1222 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
1223 	if (m_csumflags & M_CSUM_TCPv4)
1224 		cmdsts |=
1225 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
1226 	if (m_csumflags & M_CSUM_UDPv4)
1227 		cmdsts |=
1228 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
1229 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1230 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
1231 
1232 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
1233 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
1234 	}
1235 #endif
1236 
1237 	for (i = 0; i < map->dm_nsegs; i++) {
1238 		txd = &sc->sc_txdesc[current];
1239 		memset(txd, 0, sizeof(*txd));
1240 		txd->bufptr = map->dm_segs[i].ds_addr;
1241 		txd->bytecnt = map->dm_segs[i].ds_len;
1242 		txd->cmdsts = cmdsts |
1243 		    MVNETA_TX_ZERO_PADDING;
1244 		if (i == 0)
1245 		    txd->cmdsts |= MVNETA_TX_FIRST_DESC;
1246 		if (i == (map->dm_nsegs - 1))
1247 		    txd->cmdsts |= MVNETA_TX_LAST_DESC;
1248 
1249 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring),
1250 		    current * sizeof(*txd), sizeof(*txd),
1251 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1252 
1253 		last = current;
1254 		current = MVNETA_TX_RING_NEXT(current);
1255 		KASSERT(current != sc->sc_tx_cons);
1256 	}
1257 
1258 	KASSERT(sc->sc_txbuf[last].tb_m == NULL);
1259 	sc->sc_txbuf[first].tb_map = sc->sc_txbuf[last].tb_map;
1260 	sc->sc_txbuf[last].tb_map = map;
1261 	sc->sc_txbuf[last].tb_m = m;
1262 
1263 	sc->sc_tx_cnt += map->dm_nsegs;
1264 	*idx = current;
1265 
1266 	/* Let him know we sent another packet. */
1267 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), map->dm_nsegs);
1268 
1269 	DPRINTFN(3, ("mvneta_encap: completed successfully\n"));
1270 
1271 	return 0;
1272 }
1273 
1274 void
1275 mvneta_rx_proc(struct mvneta_softc *sc)
1276 {
1277 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1278 	struct mvneta_rx_desc *rxd;
1279 	struct mvneta_buf *rxb;
1280 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1281 	struct mbuf *m;
1282 	uint32_t rxstat;
1283 	int i, idx, len, ready;
1284 
1285 	DPRINTFN(3, ("%s: %d\n", __func__, sc->sc_rx_cons));
1286 
1287 	if (!(ifp->if_flags & IFF_RUNNING))
1288 		return;
1289 
1290 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring), 0,
1291 	    MVNETA_DMA_LEN(sc->sc_rxring),
1292 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1293 
1294 	ready = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1295 	MVNETA_WRITE(sc, MVNETA_PRXSU(0), ready);
1296 
1297 	for (i = 0; i < ready; i++) {
1298 		idx = sc->sc_rx_cons;
1299 		KASSERT(idx < MVNETA_RX_RING_CNT);
1300 
1301 		rxd = &sc->sc_rxdesc[idx];
1302 
1303 #ifdef DIAGNOSTIC
1304 		if ((rxd->cmdsts &
1305 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC)) !=
1306 		    (MVNETA_RX_LAST_DESC | MVNETA_RX_FIRST_DESC))
1307 			panic("%s: buffer size is smaller than packet",
1308 			    __func__);
1309 #endif
1310 
1311 		len = rxd->bytecnt;
1312 		rxb = &sc->sc_rxbuf[idx];
1313 		KASSERT(rxb->tb_m);
1314 
1315 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1316 		    len, BUS_DMASYNC_POSTREAD);
1317 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1318 
1319 		m = rxb->tb_m;
1320 		rxb->tb_m = NULL;
1321 		m->m_pkthdr.len = m->m_len = len;
1322 
1323 		rxstat = rxd->cmdsts;
1324 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1325 #if 0
1326 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1327 
1328 			if (err == MVNETA_RX_CRC_ERROR)
1329 				ifp->if_ierrors++;
1330 			if (err == MVNETA_RX_OVERRUN_ERROR)
1331 				ifp->if_ierrors++;
1332 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1333 				ifp->if_ierrors++;
1334 			if (err == MVNETA_RX_RESOURCE_ERROR)
1335 				ifp->if_ierrors++;
1336 #else
1337 			ifp->if_ierrors++;
1338 #endif
1339 			panic("%s: handle input errors", __func__);
1340 			continue;
1341 		}
1342 
1343 #if notyet
1344 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1345 			int flgs = 0;
1346 
1347 			/* Check IPv4 header checksum */
1348 			flgs |= M_CSUM_IPv4;
1349 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1350 				flgs |= M_CSUM_IPv4_BAD;
1351 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1352 				/*
1353 				 * Check TCPv4/UDPv4 checksum for
1354 				 * non-fragmented packet only.
1355 				 *
1356 				 * It seemd that sometimes
1357 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1358 				 * even if the checksum is correct and the
1359 				 * packet was not fragmented. So we don't set
1360 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1361 				 */
1362 
1363 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1364 					MVNETA_RX_L4_TYPE_TCP) &&
1365 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1366 					flgs |= M_CSUM_TCPv4;
1367 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1368 					MVNETA_RX_L4_TYPE_UDP) &&
1369 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1370 					flgs |= M_CSUM_UDPv4;
1371 			}
1372 			m->m_pkthdr.csum_flags = flgs;
1373 		}
1374 #endif
1375 
1376 		/* Skip on first 2byte (HW header) */
1377 		m_adj(m, MVNETA_HWHEADER_SIZE);
1378 
1379 		ml_enqueue(&ml, m);
1380 
1381 		if_rxr_put(&sc->sc_rx_ring, 1);
1382 
1383 		sc->sc_rx_cons = MVNETA_RX_RING_NEXT(idx);
1384 	}
1385 
1386 	mvneta_fill_rx_ring(sc);
1387 
1388 	if_input(ifp, &ml);
1389 }
1390 
1391 void
1392 mvneta_tx_proc(struct mvneta_softc *sc)
1393 {
1394 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1395 	struct mvneta_tx_desc *txd;
1396 	struct mvneta_buf *txb;
1397 	int i, idx, sent;
1398 
1399 	DPRINTFN(3, ("%s\n", __func__));
1400 
1401 	if (!(ifp->if_flags & IFF_RUNNING))
1402 		return;
1403 
1404 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1405 	    MVNETA_DMA_LEN(sc->sc_txring),
1406 	    BUS_DMASYNC_POSTREAD);
1407 
1408 	sent = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1409 	MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NORB(sent));
1410 
1411 	for (i = 0; i < sent; i++) {
1412 		idx = sc->sc_tx_cons;
1413 		KASSERT(idx < MVNETA_TX_RING_CNT);
1414 
1415 		txd = &sc->sc_txdesc[idx];
1416 		txb = &sc->sc_txbuf[idx];
1417 		if (txb->tb_m) {
1418 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1419 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1420 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1421 
1422 			m_freem(txb->tb_m);
1423 			txb->tb_m = NULL;
1424 		}
1425 
1426 		ifq_clr_oactive(&ifp->if_snd);
1427 
1428 		sc->sc_tx_cnt--;
1429 
1430 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1431 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1432 
1433 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1434 				ifp->if_collisions++;
1435 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1436 				ifp->if_oerrors++;
1437 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1438 				ifp->if_collisions++;
1439 		}
1440 
1441 		sc->sc_tx_cons = MVNETA_TX_RING_NEXT(sc->sc_tx_cons);
1442 	}
1443 
1444 	if (sc->sc_tx_cnt == 0)
1445 		ifp->if_timer = 0;
1446 }
1447 
1448 uint8_t
1449 mvneta_crc8(const uint8_t *data, size_t size)
1450 {
1451 	int bit;
1452 	uint8_t byte;
1453 	uint8_t crc = 0;
1454 	const uint8_t poly = 0x07;
1455 
1456 	while(size--)
1457 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1458 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1459 
1460 	return crc;
1461 }
1462 
1463 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1464 
1465 void
1466 mvneta_iff(struct mvneta_softc *sc)
1467 {
1468 	struct arpcom *ac = &sc->sc_ac;
1469 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1470 	struct ether_multi *enm;
1471 	struct ether_multistep step;
1472 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1473 	uint32_t pxc;
1474 	int i;
1475 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1476 
1477 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1478 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1479 	ifp->if_flags &= ~IFF_ALLMULTI;
1480 	memset(dfut, 0, sizeof(dfut));
1481 	memset(dfsmt, 0, sizeof(dfsmt));
1482 	memset(dfomt, 0, sizeof(dfomt));
1483 
1484 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1485 		ifp->if_flags |= IFF_ALLMULTI;
1486 		if (ifp->if_flags & IFF_PROMISC)
1487 			pxc |= MVNETA_PXC_UPM;
1488 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1489 			dfsmt[i] = dfomt[i] =
1490 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1491 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1492 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1493 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1494 		}
1495 	} else {
1496 		ETHER_FIRST_MULTI(step, ac, enm);
1497 		while (enm != NULL) {
1498 			/* chip handles some IPv4 multicast specially */
1499 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1500 				i = enm->enm_addrlo[5];
1501 				dfsmt[i>>2] |=
1502 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1503 			} else {
1504 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1505 				dfomt[i>>2] |=
1506 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1507 			}
1508 
1509 			ETHER_NEXT_MULTI(step, enm);
1510 		}
1511 	}
1512 
1513 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1514 
1515 	/* Set Destination Address Filter Unicast Table */
1516 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1517 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1518 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1519 
1520 	/* Set Destination Address Filter Multicast Tables */
1521 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1522 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1523 }
1524 
1525 struct mvneta_dmamem *
1526 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1527 {
1528 	struct mvneta_dmamem *mdm;
1529 	int nsegs;
1530 
1531 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1532 	mdm->mdm_size = size;
1533 
1534 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1535 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1536 		goto mdmfree;
1537 
1538 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1539 	    &nsegs, BUS_DMA_WAITOK) != 0)
1540 		goto destroy;
1541 
1542 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1543 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1544 		goto free;
1545 
1546 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1547 	    NULL, BUS_DMA_WAITOK) != 0)
1548 		goto unmap;
1549 
1550 	bzero(mdm->mdm_kva, size);
1551 
1552 	return (mdm);
1553 
1554 unmap:
1555 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1556 free:
1557 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1558 destroy:
1559 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1560 mdmfree:
1561 	free(mdm, M_DEVBUF, 0);
1562 
1563 	return (NULL);
1564 }
1565 
1566 void
1567 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1568 {
1569 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1570 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1571 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1572 	free(mdm, M_DEVBUF, 0);
1573 }
1574 
1575 struct mbuf *
1576 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1577 {
1578 	struct mbuf *m = NULL;
1579 
1580 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1581 	if (!m)
1582 		return (NULL);
1583 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1584 
1585 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1586 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1587 		m_freem(m);
1588 		return (NULL);
1589 	}
1590 
1591 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1592 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1593 
1594 	return (m);
1595 }
1596 
1597 void
1598 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1599 {
1600 	struct mvneta_rx_desc *rxd;
1601 	struct mvneta_buf *rxb;
1602 	u_int slots;
1603 
1604 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_RX_RING_CNT);
1605 	    slots > 0; slots--) {
1606 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1607 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1608 		if (rxb->tb_m == NULL)
1609 			break;
1610 
1611 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1612 		memset(rxd, 0, sizeof(*rxd));
1613 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1614 
1615 		bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1616 		    sc->sc_rx_prod * sizeof(*rxd), sizeof(*rxd),
1617 		    BUS_DMASYNC_PREWRITE);
1618 
1619 		sc->sc_rx_prod = MVNETA_RX_RING_NEXT(sc->sc_rx_prod);
1620 
1621 		/* Tell him that there's a new free desc. */
1622 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1623 		    MVNETA_PRXSU_NOOFNEWDESCRIPTORS(1));
1624 	}
1625 
1626 	if_rxr_put(&sc->sc_rx_ring, slots);
1627 }
1628