xref: /netbsd-src/sys/dev/pci/if_vte.c (revision 72d67cc1fe44e6c37c74a479977bb3a488d5bcb0)
1 /*	$NetBSD: if_vte.c,v 1.38 2024/09/05 17:54:02 andvar Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Manuel Bouyer.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*-
28  * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice unmodified, this list of conditions, and the following
36  *    disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53 /* FreeBSD: src/sys/dev/vte/if_vte.c,v 1.2 2010/12/31 01:23:04 yongari Exp */
54 
55 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: if_vte.c,v 1.38 2024/09/05 17:54:02 andvar Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/mbuf.h>
63 #include <sys/protosw.h>
64 #include <sys/socket.h>
65 #include <sys/ioctl.h>
66 #include <sys/errno.h>
67 #include <sys/kernel.h>
68 #include <sys/device.h>
69 #include <sys/sysctl.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_dl.h>
75 #include <net/route.h>
76 #include <net/bpf.h>
77 
78 #include <sys/rndsource.h>
79 
80 #include "opt_inet.h"
81 #include <net/if_ether.h>
82 #ifdef INET
83 #include <netinet/in.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip.h>
87 #include <netinet/if_inarp.h>
88 #endif
89 
90 #include <sys/bus.h>
91 #include <sys/intr.h>
92 
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95 #include <dev/pci/pcidevs.h>
96 
97 #include <dev/mii/mii.h>
98 #include <dev/mii/miivar.h>
99 
100 #include <dev/pci/if_vtereg.h>
101 #include <dev/pci/if_vtevar.h>
102 
103 static int	vte_match(device_t, cfdata_t, void *);
104 static void	vte_attach(device_t, device_t, void *);
105 static int	vte_detach(device_t, int);
106 static int	vte_dma_alloc(struct vte_softc *);
107 static void	vte_dma_free(struct vte_softc *);
108 static struct vte_txdesc *
109 		vte_encap(struct vte_softc *, struct mbuf **);
110 static void	vte_get_macaddr(struct vte_softc *);
111 static int	vte_init(struct ifnet *);
112 static int	vte_init_rx_ring(struct vte_softc *);
113 static int	vte_init_tx_ring(struct vte_softc *);
114 static int	vte_intr(void *);
115 static int	vte_ifioctl(struct ifnet *, u_long, void *);
116 static void	vte_mac_config(struct vte_softc *);
117 static int	vte_miibus_readreg(device_t, int, int, uint16_t *);
118 static void	vte_miibus_statchg(struct ifnet *);
119 static int	vte_miibus_writereg(device_t, int, int, uint16_t);
120 static int	vte_mediachange(struct ifnet *);
121 static int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
122 static void	vte_reset(struct vte_softc *);
123 static void	vte_rxeof(struct vte_softc *);
124 static void	vte_rxfilter(struct vte_softc *);
125 static bool	vte_shutdown(device_t, int);
126 static bool	vte_suspend(device_t, const pmf_qual_t *);
127 static bool	vte_resume(device_t, const pmf_qual_t *);
128 static void	vte_ifstart(struct ifnet *);
129 static void	vte_start_mac(struct vte_softc *);
130 static void	vte_stats_clear(struct vte_softc *);
131 static void	vte_stats_update(struct vte_softc *);
132 static void	vte_stop(struct ifnet *, int);
133 static void	vte_stop_mac(struct vte_softc *);
134 static void	vte_tick(void *);
135 static void	vte_txeof(struct vte_softc *);
136 static void	vte_ifwatchdog(struct ifnet *);
137 
138 static int vte_sysctl_intrxct(SYSCTLFN_PROTO);
139 static int vte_sysctl_inttxct(SYSCTLFN_PROTO);
140 static int vte_root_num;
141 
142 #define DPRINTF(a)
143 
144 CFATTACH_DECL3_NEW(vte, sizeof(struct vte_softc),
145     vte_match, vte_attach, vte_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
146 
147 
148 static int
149 vte_match(device_t parent, cfdata_t cf, void *aux)
150 {
151 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
152 
153 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RDC &&
154 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_RDC_R6040)
155 		return 1;
156 
157 	return 0;
158 }
159 
160 static void
161 vte_attach(device_t parent, device_t self, void *aux)
162 {
163 	struct vte_softc *sc = device_private(self);
164 	struct pci_attach_args * const pa = (struct pci_attach_args *)aux;
165 	struct ifnet * const ifp = &sc->vte_if;
166 	struct mii_data * const mii = &sc->vte_mii;
167 	int h_valid;
168 	pcireg_t reg, csr;
169 	pci_intr_handle_t intrhandle;
170 	const char *intrstr;
171 	int error;
172 	const struct sysctlnode *node;
173 	int vte_nodenum;
174 	char intrbuf[PCI_INTRSTR_LEN];
175 
176 	sc->vte_dev = self;
177 
178 	callout_init(&sc->vte_tick_ch, 0);
179 	callout_setfunc(&sc->vte_tick_ch, vte_tick, sc);
180 
181 	/* Map the device. */
182 	h_valid = 0;
183 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BMEM);
184 	if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_MEM) {
185 		h_valid = (pci_mapreg_map(pa, VTE_PCI_BMEM,
186 		    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
187 		    0, &sc->vte_bustag, &sc->vte_bushandle, NULL, NULL) == 0);
188 	}
189 	if (h_valid == 0) {
190 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BIO);
191 		if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
192 			h_valid = (pci_mapreg_map(pa, VTE_PCI_BIO,
193 			    PCI_MAPREG_TYPE_IO, 0, &sc->vte_bustag,
194 			    &sc->vte_bushandle, NULL, NULL) == 0);
195 		}
196 	}
197 	if (h_valid == 0) {
198 		aprint_error_dev(self, "unable to map device registers\n");
199 		return;
200 	}
201 	sc->vte_dmatag = pa->pa_dmat;
202 	/* Enable the device. */
203 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
204 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
205 	    csr | PCI_COMMAND_MASTER_ENABLE);
206 
207 	pci_aprint_devinfo(pa, NULL);
208 
209 	/* Reset the ethernet controller. */
210 	vte_reset(sc);
211 
212 	if ((error = vte_dma_alloc(sc)) != 0)
213 		return;
214 
215 	/* Load station address. */
216 	vte_get_macaddr(sc);
217 
218 	aprint_normal_dev(self, "Ethernet address %s\n",
219 	    ether_sprintf(sc->vte_eaddr));
220 
221 	/* Map and establish interrupts */
222 	if (pci_intr_map(pa, &intrhandle)) {
223 		aprint_error_dev(self, "couldn't map interrupt\n");
224 		return;
225 	}
226 	intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf,
227 	    sizeof(intrbuf));
228 	sc->vte_ih = pci_intr_establish_xname(pa->pa_pc, intrhandle, IPL_NET,
229 	    vte_intr, sc, device_xname(self));
230 	if (sc->vte_ih == NULL) {
231 		aprint_error_dev(self, "couldn't establish interrupt");
232 		if (intrstr != NULL)
233 			aprint_error(" at %s", intrstr);
234 		aprint_error("\n");
235 		return;
236 	}
237 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
238 
239 	sc->vte_if.if_softc = sc;
240 	mii->mii_ifp = ifp;
241 	mii->mii_readreg = vte_miibus_readreg;
242 	mii->mii_writereg = vte_miibus_writereg;
243 	mii->mii_statchg = vte_miibus_statchg;
244 	sc->vte_ec.ec_mii = mii;
245 	ifmedia_init(&mii->mii_media, IFM_IMASK, vte_mediachange,
246 	    ether_mediastatus);
247 	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
248 	    MII_OFFSET_ANY, 0);
249 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
250 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
251 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
252 	} else
253 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
254 
255 	/*
256 	 * We can support 802.1Q VLAN-sized frames.
257 	 */
258 	sc->vte_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
259 
260 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
261 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
262 	ifp->if_ioctl = vte_ifioctl;
263 	ifp->if_start = vte_ifstart;
264 	ifp->if_watchdog = vte_ifwatchdog;
265 	ifp->if_init = vte_init;
266 	ifp->if_stop = vte_stop;
267 	ifp->if_timer = 0;
268 	IFQ_SET_READY(&ifp->if_snd);
269 	if_attach(ifp);
270 	if_deferred_start_init(ifp, NULL);
271 	ether_ifattach(&(sc)->vte_if, (sc)->vte_eaddr);
272 
273 	if (pmf_device_register1(self, vte_suspend, vte_resume, vte_shutdown))
274 		pmf_class_network_register(self, ifp);
275 	else
276 		aprint_error_dev(self, "couldn't establish power handler\n");
277 
278 	rnd_attach_source(&sc->rnd_source, device_xname(self),
279 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
280 
281 	if (sysctl_createv(&sc->vte_clog, 0, NULL, &node,
282 	    0, CTLTYPE_NODE, device_xname(sc->vte_dev),
283 	    SYSCTL_DESCR("vte per-controller controls"),
284 	    NULL, 0, NULL, 0, CTL_HW, vte_root_num, CTL_CREATE,
285 	    CTL_EOL) != 0) {
286 		aprint_normal_dev(sc->vte_dev, "couldn't create sysctl node\n");
287 		return;
288 	}
289 	vte_nodenum = node->sysctl_num;
290 	if (sysctl_createv(&sc->vte_clog, 0, NULL, &node,
291 	    CTLFLAG_READWRITE,
292 	    CTLTYPE_INT, "int_rxct",
293 	    SYSCTL_DESCR("vte RX interrupt moderation packet counter"),
294 	    vte_sysctl_intrxct, 0, (void *)sc,
295 	    0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE,
296 	    CTL_EOL) != 0) {
297 		aprint_normal_dev(sc->vte_dev,
298 		    "couldn't create int_rxct sysctl node\n");
299 	}
300 	if (sysctl_createv(&sc->vte_clog, 0, NULL, &node,
301 	    CTLFLAG_READWRITE,
302 	    CTLTYPE_INT, "int_txct",
303 	    SYSCTL_DESCR("vte TX interrupt moderation packet counter"),
304 	    vte_sysctl_inttxct, 0, (void *)sc,
305 	    0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE,
306 	    CTL_EOL) != 0) {
307 		aprint_normal_dev(sc->vte_dev,
308 		    "couldn't create int_txct sysctl node\n");
309 	}
310 }
311 
312 static int
313 vte_detach(device_t dev, int flags __unused)
314 {
315 	struct vte_softc *sc = device_private(dev);
316 	struct ifnet *ifp = &sc->vte_if;
317 	int s;
318 
319 	s = splnet();
320 	/* Stop the interface. Callouts are stopped in it. */
321 	vte_stop(ifp, 1);
322 	splx(s);
323 
324 	pmf_device_deregister(dev);
325 
326 	mii_detach(&sc->vte_mii, MII_PHY_ANY, MII_OFFSET_ANY);
327 
328 	ether_ifdetach(ifp);
329 	if_detach(ifp);
330 	ifmedia_fini(&sc->vte_mii.mii_media);
331 
332 	vte_dma_free(sc);
333 
334 	return (0);
335 }
336 
337 static int
338 vte_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
339 {
340 	struct vte_softc *sc = device_private(dev);
341 	int i;
342 
343 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
344 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
345 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
346 		DELAY(5);
347 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
348 			break;
349 	}
350 
351 	if (i == 0) {
352 		aprint_error_dev(sc->vte_dev, "phy read timeout : %d\n", reg);
353 		return ETIMEDOUT;
354 	}
355 
356 	*val = CSR_READ_2(sc, VTE_MMRD);
357 	return 0;
358 }
359 
360 static int
361 vte_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
362 {
363 	struct vte_softc *sc = device_private(dev);
364 	int i;
365 
366 	CSR_WRITE_2(sc, VTE_MMWD, val);
367 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
368 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
369 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
370 		DELAY(5);
371 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
372 			break;
373 	}
374 
375 	if (i == 0) {
376 		aprint_error_dev(sc->vte_dev, "phy write timeout : %d\n", reg);
377 		return ETIMEDOUT;
378 	}
379 
380 	return 0;
381 }
382 
383 static void
384 vte_miibus_statchg(struct ifnet *ifp)
385 {
386 	struct vte_softc *sc = ifp->if_softc;
387 	uint16_t val;
388 
389 	DPRINTF(("vte_miibus_statchg 0x%x 0x%x\n",
390 	    sc->vte_mii.mii_media_status, sc->vte_mii.mii_media_active));
391 
392 	sc->vte_flags &= ~VTE_FLAG_LINK;
393 	if ((sc->vte_mii.mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
394 	    (IFM_ACTIVE | IFM_AVALID)) {
395 		switch (IFM_SUBTYPE(sc->vte_mii.mii_media_active)) {
396 		case IFM_10_T:
397 		case IFM_100_TX:
398 			sc->vte_flags |= VTE_FLAG_LINK;
399 			break;
400 		default:
401 			break;
402 		}
403 	}
404 
405 	/* Stop RX/TX MACs. */
406 	vte_stop_mac(sc);
407 	/* Program MACs with resolved duplex and flow control. */
408 	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
409 		/*
410 		 * Timer waiting time : (63 + TIMER * 64) MII clock.
411 		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
412 		 */
413 		if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX)
414 			val = 18 << VTE_IM_TIMER_SHIFT;
415 		else
416 			val = 1 << VTE_IM_TIMER_SHIFT;
417 		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
418 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
419 		CSR_WRITE_2(sc, VTE_MRICR, val);
420 
421 		if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX)
422 			val = 18 << VTE_IM_TIMER_SHIFT;
423 		else
424 			val = 1 << VTE_IM_TIMER_SHIFT;
425 		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
426 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
427 		CSR_WRITE_2(sc, VTE_MTICR, val);
428 
429 		vte_mac_config(sc);
430 		vte_start_mac(sc);
431 		DPRINTF(("vte_miibus_statchg: link\n"));
432 	}
433 }
434 
435 static void
436 vte_get_macaddr(struct vte_softc *sc)
437 {
438 	uint16_t mid;
439 
440 	/*
441 	 * It seems there is no way to reload station address and
442 	 * it is supposed to be set by BIOS.
443 	 */
444 	mid = CSR_READ_2(sc, VTE_MID0L);
445 	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
446 	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
447 	mid = CSR_READ_2(sc, VTE_MID0M);
448 	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
449 	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
450 	mid = CSR_READ_2(sc, VTE_MID0H);
451 	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
452 	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
453 }
454 
455 
456 static int
457 vte_dma_alloc(struct vte_softc *sc)
458 {
459 	struct vte_txdesc *txd;
460 	struct vte_rxdesc *rxd;
461 	int error, i, rseg;
462 
463 	/* create DMA map for TX ring */
464 	error = bus_dmamap_create(sc->vte_dmatag, VTE_TX_RING_SZ, 1,
465 	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
466 	    &sc->vte_cdata.vte_tx_ring_map);
467 	if (error) {
468 		aprint_error_dev(sc->vte_dev,
469 		    "could not create dma map for TX ring (%d)\n",
470 		    error);
471 		goto fail;
472 	}
473 	/* Allocate and map DMA'able memory and load the DMA map for TX ring. */
474 	error = bus_dmamem_alloc(sc->vte_dmatag, VTE_TX_RING_SZ,
475 	    VTE_TX_RING_ALIGN, 0,
476 	    sc->vte_cdata.vte_tx_ring_seg, 1, &rseg,
477 	    BUS_DMA_NOWAIT);
478 	if (error != 0) {
479 		aprint_error_dev(sc->vte_dev,
480 		    "could not allocate DMA'able memory for TX ring (%d).\n",
481 		    error);
482 		goto fail;
483 	}
484 	KASSERT(rseg == 1);
485 	error = bus_dmamem_map(sc->vte_dmatag,
486 	    sc->vte_cdata.vte_tx_ring_seg, 1,
487 	    VTE_TX_RING_SZ, (void **)(&sc->vte_cdata.vte_tx_ring),
488 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
489 	if (error != 0) {
490 		aprint_error_dev(sc->vte_dev,
491 		    "could not map DMA'able memory for TX ring (%d).\n",
492 		    error);
493 		goto fail;
494 	}
495 	memset(sc->vte_cdata.vte_tx_ring, 0, VTE_TX_RING_SZ);
496 	error = bus_dmamap_load(sc->vte_dmatag,
497 	    sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
498 	    VTE_TX_RING_SZ, NULL,
499 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
500 	if (error != 0) {
501 		aprint_error_dev(sc->vte_dev,
502 		    "could not load DMA'able memory for TX ring.\n");
503 		goto fail;
504 	}
505 
506 	/* create DMA map for RX ring */
507 	error = bus_dmamap_create(sc->vte_dmatag, VTE_RX_RING_SZ, 1,
508 	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
509 	    &sc->vte_cdata.vte_rx_ring_map);
510 	if (error) {
511 		aprint_error_dev(sc->vte_dev,
512 		    "could not create dma map for RX ring (%d)\n",
513 		    error);
514 		goto fail;
515 	}
516 	/* Allocate and map DMA'able memory and load the DMA map for RX ring. */
517 	error = bus_dmamem_alloc(sc->vte_dmatag, VTE_RX_RING_SZ,
518 	    VTE_RX_RING_ALIGN, 0,
519 	    sc->vte_cdata.vte_rx_ring_seg, 1, &rseg,
520 	    BUS_DMA_NOWAIT);
521 	if (error != 0) {
522 		aprint_error_dev(sc->vte_dev,
523 		    "could not allocate DMA'able memory for RX ring (%d).\n",
524 		    error);
525 		goto fail;
526 	}
527 	KASSERT(rseg == 1);
528 	error = bus_dmamem_map(sc->vte_dmatag,
529 	    sc->vte_cdata.vte_rx_ring_seg, 1,
530 	    VTE_RX_RING_SZ, (void **)(&sc->vte_cdata.vte_rx_ring),
531 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
532 	if (error != 0) {
533 		aprint_error_dev(sc->vte_dev,
534 		    "could not map DMA'able memory for RX ring (%d).\n",
535 		    error);
536 		goto fail;
537 	}
538 	memset(sc->vte_cdata.vte_rx_ring, 0, VTE_RX_RING_SZ);
539 	error = bus_dmamap_load(sc->vte_dmatag,
540 	    sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
541 	    VTE_RX_RING_SZ, NULL,
542 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
543 	if (error != 0) {
544 		aprint_error_dev(sc->vte_dev,
545 		    "could not load DMA'able memory for RX ring (%d).\n",
546 		    error);
547 		goto fail;
548 	}
549 
550 	/* Create DMA maps for TX buffers. */
551 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
552 		txd = &sc->vte_cdata.vte_txdesc[i];
553 		txd->tx_m = NULL;
554 		txd->tx_dmamap = NULL;
555 		error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES,
556 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
557 		    &txd->tx_dmamap);
558 		if (error != 0) {
559 			aprint_error_dev(sc->vte_dev,
560 			    "could not create TX DMA map %d (%d).\n", i, error);
561 			goto fail;
562 		}
563 	}
564 	/* Create DMA maps for RX buffers. */
565 	if ((error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES,
566 	    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
567 	    &sc->vte_cdata.vte_rx_sparemap)) != 0) {
568 		aprint_error_dev(sc->vte_dev,
569 		    "could not create spare RX dmamap (%d).\n", error);
570 		goto fail;
571 	}
572 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
573 		rxd = &sc->vte_cdata.vte_rxdesc[i];
574 		rxd->rx_m = NULL;
575 		rxd->rx_dmamap = NULL;
576 		error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES,
577 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
578 		    &rxd->rx_dmamap);
579 		if (error != 0) {
580 			aprint_error_dev(sc->vte_dev,
581 			    "could not create RX dmamap %d (%d).\n", i, error);
582 			goto fail;
583 		}
584 	}
585 	return 0;
586 
587 fail:
588 	vte_dma_free(sc);
589 	return (error);
590 }
591 
592 static void
593 vte_dma_free(struct vte_softc *sc)
594 {
595 	struct vte_txdesc *txd;
596 	struct vte_rxdesc *rxd;
597 	int i;
598 
599 	/* TX buffers. */
600 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
601 		txd = &sc->vte_cdata.vte_txdesc[i];
602 		if (txd->tx_dmamap != NULL) {
603 			bus_dmamap_destroy(sc->vte_dmatag, txd->tx_dmamap);
604 			txd->tx_dmamap = NULL;
605 		}
606 	}
607 	/* RX buffers */
608 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
609 		rxd = &sc->vte_cdata.vte_rxdesc[i];
610 		if (rxd->rx_dmamap != NULL) {
611 			bus_dmamap_destroy(sc->vte_dmatag, rxd->rx_dmamap);
612 			rxd->rx_dmamap = NULL;
613 		}
614 	}
615 	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
616 		bus_dmamap_destroy(sc->vte_dmatag,
617 		    sc->vte_cdata.vte_rx_sparemap);
618 		sc->vte_cdata.vte_rx_sparemap = NULL;
619 	}
620 	/* TX descriptor ring. */
621 	if (sc->vte_cdata.vte_tx_ring_map != NULL) {
622 		bus_dmamap_unload(sc->vte_dmatag,
623 		    sc->vte_cdata.vte_tx_ring_map);
624 		bus_dmamap_destroy(sc->vte_dmatag,
625 		    sc->vte_cdata.vte_tx_ring_map);
626 	}
627 	if (sc->vte_cdata.vte_tx_ring != NULL) {
628 		bus_dmamem_unmap(sc->vte_dmatag,
629 		    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ);
630 		bus_dmamem_free(sc->vte_dmatag,
631 		    sc->vte_cdata.vte_tx_ring_seg, 1);
632 	}
633 	sc->vte_cdata.vte_tx_ring = NULL;
634 	sc->vte_cdata.vte_tx_ring_map = NULL;
635 	/* RX ring. */
636 	if (sc->vte_cdata.vte_rx_ring_map != NULL) {
637 		bus_dmamap_unload(sc->vte_dmatag,
638 		    sc->vte_cdata.vte_rx_ring_map);
639 		bus_dmamap_destroy(sc->vte_dmatag,
640 		    sc->vte_cdata.vte_rx_ring_map);
641 	}
642 	if (sc->vte_cdata.vte_rx_ring != NULL) {
643 		bus_dmamem_unmap(sc->vte_dmatag,
644 		    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ);
645 		bus_dmamem_free(sc->vte_dmatag,
646 		    sc->vte_cdata.vte_rx_ring_seg, 1);
647 	}
648 	sc->vte_cdata.vte_rx_ring = NULL;
649 	sc->vte_cdata.vte_rx_ring_map = NULL;
650 }
651 
652 static bool
653 vte_shutdown(device_t dev, int howto)
654 {
655 
656 	return (vte_suspend(dev, NULL));
657 }
658 
659 static bool
660 vte_suspend(device_t dev, const pmf_qual_t *qual)
661 {
662 	struct vte_softc *sc = device_private(dev);
663 	struct ifnet *ifp = &sc->vte_if;
664 
665 	DPRINTF(("vte_suspend if_flags 0x%x\n", ifp->if_flags));
666 	if ((ifp->if_flags & IFF_RUNNING) != 0)
667 		vte_stop(ifp, 1);
668 	return (0);
669 }
670 
671 static bool
672 vte_resume(device_t dev, const pmf_qual_t *qual)
673 {
674 	struct vte_softc *sc = device_private(dev);
675 	struct ifnet *ifp;
676 
677 	ifp = &sc->vte_if;
678 	if ((ifp->if_flags & IFF_UP) != 0) {
679 		ifp->if_flags &= ~IFF_RUNNING;
680 		vte_init(ifp);
681 	}
682 
683 	return (0);
684 }
685 
686 static struct vte_txdesc *
687 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
688 {
689 	struct vte_txdesc *txd;
690 	struct mbuf *m, *n;
691 	int copy, error, padlen;
692 
693 	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
694 	m = *m_head;
695 	/*
696 	 * Controller doesn't auto-pad, so we have to make sure pad
697 	 * short frames out to the minimum frame length.
698 	 */
699 	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
700 		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
701 	else
702 		padlen = 0;
703 
704 	/*
705 	 * Controller does not support multi-fragmented TX buffers.
706 	 * Controller spends most of its TX processing time in
707 	 * de-fragmenting TX buffers.  Either faster CPU or more
708 	 * advanced controller DMA engine is required to speed up
709 	 * TX path processing.
710 	 * To mitigate the de-fragmenting issue, perform deep copy
711 	 * from fragmented mbuf chains to a pre-allocated mbuf
712 	 * cluster with extra cost of kernel memory.  For frames
713 	 * that is composed of single TX buffer, the deep copy is
714 	 * bypassed.
715 	 */
716 	copy = 0;
717 	if (m->m_next != NULL)
718 		copy++;
719 	if (padlen > 0 && (M_READONLY(m) ||
720 	    padlen > M_TRAILINGSPACE(m)))
721 		copy++;
722 	if (copy != 0) {
723 		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
724 		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
725 		n->m_pkthdr.len = m->m_pkthdr.len;
726 		n->m_len = m->m_pkthdr.len;
727 		m = n;
728 		txd->tx_flags |= VTE_TXMBUF;
729 	}
730 
731 	if (padlen > 0) {
732 		/* Zero out the bytes in the pad area. */
733 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
734 		m->m_pkthdr.len += padlen;
735 		m->m_len = m->m_pkthdr.len;
736 	}
737 
738 	error = bus_dmamap_load_mbuf(sc->vte_dmatag, txd->tx_dmamap, m,
739 	    BUS_DMA_NOWAIT);
740 	if (error != 0) {
741 		txd->tx_flags &= ~VTE_TXMBUF;
742 		return (NULL);
743 	}
744 	KASSERT(txd->tx_dmamap->dm_nsegs == 1);
745 	bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0,
746 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
747 
748 	txd->tx_desc->dtlen =
749 	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
750 	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
751 	sc->vte_cdata.vte_tx_cnt++;
752 	/* Update producer index. */
753 	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
754 
755 	/* Finally hand over ownership to controller. */
756 	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
757 	txd->tx_m = m;
758 
759 	return (txd);
760 }
761 
762 static void
763 vte_ifstart(struct ifnet *ifp)
764 {
765 	struct vte_softc *sc = ifp->if_softc;
766 	struct vte_txdesc *txd;
767 	struct mbuf *m_head, *m;
768 	int enq;
769 
770 	ifp = &sc->vte_if;
771 
772 	DPRINTF(("vte_ifstart 0x%x 0x%x\n", ifp->if_flags, sc->vte_flags));
773 
774 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
775 		return;
776 	}
777 	if ((sc->vte_flags & VTE_FLAG_LINK) == 0) {
778 		return;
779 	}
780 
781 	/* Reserve one free TX descriptor. */
782 	for (enq = 0; sc->vte_cdata.vte_tx_cnt < VTE_TX_RING_CNT - 1; ) {
783 		IFQ_POLL(&ifp->if_snd, m_head);
784 		if (m_head == NULL)
785 			break;
786 		/*
787 		 * Pack the data into the transmit ring.
788 		 */
789 		DPRINTF(("vte_encap:"));
790 		if ((txd = vte_encap(sc, &m_head)) == NULL) {
791 			DPRINTF((" failed\n"));
792 			break;
793 		}
794 		DPRINTF((" ok\n"));
795 		IFQ_DEQUEUE(&ifp->if_snd, m);
796 		KASSERT(m == m_head);
797 
798 		enq++;
799 		/*
800 		 * If there's a BPF listener, bounce a copy of this frame
801 		 * to him.
802 		 */
803 		bpf_mtap(ifp, m_head, BPF_D_OUT);
804 		/* Free consumed TX frame. */
805 		if ((txd->tx_flags & VTE_TXMBUF) != 0)
806 			m_freem(m_head);
807 	}
808 
809 	if (enq > 0) {
810 		bus_dmamap_sync(sc->vte_dmatag,
811 		    sc->vte_cdata.vte_tx_ring_map, 0,
812 		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
813 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
814 		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
815 		sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
816 	}
817 }
818 
819 static void
820 vte_ifwatchdog(struct ifnet *ifp)
821 {
822 	struct vte_softc *sc = ifp->if_softc;
823 
824 	if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
825 		return;
826 
827 	aprint_error_dev(sc->vte_dev, "watchdog timeout -- resetting\n");
828 	if_statinc(ifp, if_oerrors);
829 	vte_init(ifp);
830 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
831 		vte_ifstart(ifp);
832 }
833 
834 static int
835 vte_mediachange(struct ifnet *ifp)
836 {
837 	int error;
838 	struct vte_softc *sc = ifp->if_softc;
839 
840 	if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO)
841 		error = 0;
842 	else if (error != 0) {
843 		aprint_error_dev(sc->vte_dev, "could not set media\n");
844 		return error;
845 	}
846 	return 0;
847 
848 }
849 
850 static int
851 vte_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
852 {
853 	struct vte_softc *sc = ifp->if_softc;
854 	int error, s;
855 
856 	s = splnet();
857 	error = ether_ioctl(ifp, cmd, data);
858 	if (error == ENETRESET) {
859 		DPRINTF(("vte_ifioctl if_flags 0x%x\n", ifp->if_flags));
860 		if (ifp->if_flags & IFF_RUNNING)
861 			vte_rxfilter(sc);
862 		error = 0;
863 	}
864 	splx(s);
865 	return error;
866 }
867 
868 static void
869 vte_mac_config(struct vte_softc *sc)
870 {
871 	uint16_t mcr;
872 
873 	mcr = CSR_READ_2(sc, VTE_MCR0);
874 	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
875 	if ((IFM_OPTIONS(sc->vte_mii.mii_media_active) & IFM_FDX) != 0) {
876 		mcr |= MCR0_FULL_DUPLEX;
877 #ifdef notyet
878 		if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
879 			mcr |= MCR0_FC_ENB;
880 		/*
881 		 * The data sheet is not clear whether the controller
882 		 * honors received pause frames or not.  The is no
883 		 * separate control bit for RX pause frame so just
884 		 * enable MCR0_FC_ENB bit.
885 		 */
886 		if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
887 			mcr |= MCR0_FC_ENB;
888 #endif
889 	}
890 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
891 }
892 
893 static void
894 vte_stats_clear(struct vte_softc *sc)
895 {
896 
897 	/* Reading counter registers clears its contents. */
898 	CSR_READ_2(sc, VTE_CNT_RX_DONE);
899 	CSR_READ_2(sc, VTE_CNT_MECNT0);
900 	CSR_READ_2(sc, VTE_CNT_MECNT1);
901 	CSR_READ_2(sc, VTE_CNT_MECNT2);
902 	CSR_READ_2(sc, VTE_CNT_MECNT3);
903 	CSR_READ_2(sc, VTE_CNT_TX_DONE);
904 	CSR_READ_2(sc, VTE_CNT_MECNT4);
905 	CSR_READ_2(sc, VTE_CNT_PAUSE);
906 }
907 
908 static void
909 vte_stats_update(struct vte_softc *sc)
910 {
911 	struct vte_hw_stats *stat;
912 	struct ifnet *ifp = &sc->vte_if;
913 	uint16_t value;
914 
915 	stat = &sc->vte_stats;
916 
917 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
918 
919 	CSR_READ_2(sc, VTE_MECISR);
920 
921 	/* RX stats. */
922 	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
923 
924 	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
925 	stat->rx_bcast_frames += (value >> 8);
926 	stat->rx_mcast_frames += (value & 0xFF);
927 
928 	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
929 	if_statadd_ref(ifp, nsr, if_ierrors,
930 	    (value >> 8) +			/* rx_runts */
931 	    (value & 0xFF));			/* rx_crcerrs */
932 
933 	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
934 	if_statadd_ref(ifp, nsr, if_ierrors,
935 	    (value & 0xFF));			/* rx_long_frames */
936 
937 	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
938 	if_statadd_ref(ifp, nsr, if_ierrors,
939 	    (value >> 8));			/* rx_fifo_full */
940 	stat->rx_desc_unavail += (value & 0xFF);
941 
942 	/* TX stats. */
943 	if_statadd_ref(ifp, nsr, if_opackets,
944 	    CSR_READ_2(sc, VTE_CNT_TX_DONE));	/* tx_frames */
945 
946 	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
947 	if_statadd_ref(ifp, nsr, if_oerrors,
948 	    (value >> 8) +			/* tx_underruns */
949 	    (value & 0xFF));			/* tx_late_colls */
950 
951 	/* Pause stats. */
952 	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
953 	stat->tx_pause_frames += (value >> 8);
954 	stat->rx_pause_frames += (value & 0xFF);
955 
956 	IF_STAT_PUTREF(ifp);
957 }
958 
959 static int
960 vte_intr(void *arg)
961 {
962 	struct vte_softc *sc = (struct vte_softc *)arg;
963 	struct ifnet *ifp = &sc->vte_if;
964 	uint16_t status;
965 	int n;
966 
967 	/* Reading VTE_MISR acknowledges interrupts. */
968 	status = CSR_READ_2(sc, VTE_MISR);
969 	DPRINTF(("vte_intr status 0x%x\n", status));
970 	if ((status & VTE_INTRS) == 0) {
971 		/* Not ours. */
972 		return 0;
973 	}
974 
975 	/* Disable interrupts. */
976 	CSR_WRITE_2(sc, VTE_MIER, 0);
977 	for (n = 8; (status & VTE_INTRS) != 0;) {
978 		if ((ifp->if_flags & IFF_RUNNING) == 0)
979 			break;
980 		if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
981 		    MISR_RX_FIFO_FULL)) != 0)
982 			vte_rxeof(sc);
983 		if ((status & MISR_TX_DONE) != 0)
984 			vte_txeof(sc);
985 		if ((status & MISR_EVENT_CNT_OFLOW) != 0)
986 			vte_stats_update(sc);
987 		if_schedule_deferred_start(ifp);
988 		if (--n > 0)
989 			status = CSR_READ_2(sc, VTE_MISR);
990 		else
991 			break;
992 	}
993 
994 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
995 		/* Re-enable interrupts. */
996 		CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
997 	}
998 	return 1;
999 }
1000 
1001 static void
1002 vte_txeof(struct vte_softc *sc)
1003 {
1004 	struct ifnet *ifp;
1005 	struct vte_txdesc *txd;
1006 	uint16_t status;
1007 	int cons, prog;
1008 
1009 	ifp = &sc->vte_if;
1010 
1011 	if (sc->vte_cdata.vte_tx_cnt == 0)
1012 		return;
1013 	bus_dmamap_sync(sc->vte_dmatag,
1014 	    sc->vte_cdata.vte_tx_ring_map, 0,
1015 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
1016 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1017 	cons = sc->vte_cdata.vte_tx_cons;
1018 	/*
1019 	 * Go through our TX list and free mbufs for those
1020 	 * frames which have been transmitted.
1021 	 */
1022 	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1023 		txd = &sc->vte_cdata.vte_txdesc[cons];
1024 		status = le16toh(txd->tx_desc->dtst);
1025 		if ((status & VTE_DTST_TX_OWN) != 0)
1026 			break;
1027 		if ((status & VTE_DTST_TX_OK) != 0)
1028 			if_statadd(ifp, if_collisions, (status & 0xf));
1029 		sc->vte_cdata.vte_tx_cnt--;
1030 		/* Reclaim transmitted mbufs. */
1031 		bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0,
1032 		    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1033 		bus_dmamap_unload(sc->vte_dmatag, txd->tx_dmamap);
1034 		if ((txd->tx_flags & VTE_TXMBUF) == 0)
1035 			m_freem(txd->tx_m);
1036 		txd->tx_flags &= ~VTE_TXMBUF;
1037 		txd->tx_m = NULL;
1038 		prog++;
1039 		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1040 	}
1041 
1042 	if (prog > 0) {
1043 		sc->vte_cdata.vte_tx_cons = cons;
1044 		/*
1045 		 * Unarm watchdog timer only when there is no pending
1046 		 * frames in TX queue.
1047 		 */
1048 		if (sc->vte_cdata.vte_tx_cnt == 0)
1049 			sc->vte_watchdog_timer = 0;
1050 	}
1051 }
1052 
1053 static int
1054 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1055 {
1056 	struct mbuf *m;
1057 	bus_dmamap_t map;
1058 
1059 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1060 	if (m == NULL)
1061 		return (ENOBUFS);
1062 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1063 	m_adj(m, sizeof(uint32_t));
1064 
1065 	if (bus_dmamap_load_mbuf(sc->vte_dmatag,
1066 	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT) != 0) {
1067 		m_freem(m);
1068 		return (ENOBUFS);
1069 	}
1070 	KASSERT(sc->vte_cdata.vte_rx_sparemap->dm_nsegs == 1);
1071 
1072 	if (rxd->rx_m != NULL) {
1073 		bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap,
1074 		    0, rxd->rx_dmamap->dm_mapsize,
1075 		    BUS_DMASYNC_POSTREAD);
1076 		bus_dmamap_unload(sc->vte_dmatag, rxd->rx_dmamap);
1077 	}
1078 	map = rxd->rx_dmamap;
1079 	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1080 	sc->vte_cdata.vte_rx_sparemap = map;
1081 	bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap,
1082 	    0, rxd->rx_dmamap->dm_mapsize,
1083 	    BUS_DMASYNC_PREREAD);
1084 	rxd->rx_m = m;
1085 	rxd->rx_desc->drbp =
1086 	    htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
1087 	rxd->rx_desc->drlen = htole16(
1088 	    VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
1089 	DPRINTF(("rx data %p mbuf %p buf 0x%x/0x%x\n", rxd, m,
1090 		(u_int)rxd->rx_dmamap->dm_segs[0].ds_addr,
1091 		rxd->rx_dmamap->dm_segs[0].ds_len));
1092 	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1093 
1094 	return (0);
1095 }
1096 
1097 static void
1098 vte_rxeof(struct vte_softc *sc)
1099 {
1100 	struct ifnet *ifp;
1101 	struct vte_rxdesc *rxd;
1102 	struct mbuf *m;
1103 	uint16_t status, total_len;
1104 	int cons, prog;
1105 
1106 	bus_dmamap_sync(sc->vte_dmatag,
1107 	    sc->vte_cdata.vte_rx_ring_map, 0,
1108 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1109 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1110 	cons = sc->vte_cdata.vte_rx_cons;
1111 	ifp = &sc->vte_if;
1112 	DPRINTF(("vte_rxeof if_flags 0x%x\n", ifp->if_flags));
1113 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1114 	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1115 		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1116 		status = le16toh(rxd->rx_desc->drst);
1117 		DPRINTF(("vte_rxeof rxd %d/%p mbuf %p status 0x%x len %d\n",
1118 			cons, rxd, rxd->rx_m, status,
1119 			VTE_RX_LEN(le16toh(rxd->rx_desc->drlen))));
1120 		if ((status & VTE_DRST_RX_OWN) != 0)
1121 			break;
1122 		total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1123 		m = rxd->rx_m;
1124 		if ((status & VTE_DRST_RX_OK) == 0) {
1125 			/* Discard errored frame. */
1126 			rxd->rx_desc->drlen =
1127 			    htole16(MCLBYTES - sizeof(uint32_t));
1128 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1129 			continue;
1130 		}
1131 		if (vte_newbuf(sc, rxd) != 0) {
1132 			DPRINTF(("vte_rxeof newbuf failed\n"));
1133 			if_statinc(ifp, if_ierrors);
1134 			rxd->rx_desc->drlen =
1135 			    htole16(MCLBYTES - sizeof(uint32_t));
1136 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1137 			continue;
1138 		}
1139 
1140 		/*
1141 		 * It seems there is no way to strip FCS bytes.
1142 		 */
1143 		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1144 		m_set_rcvif(m, ifp);
1145 		if_percpuq_enqueue(ifp->if_percpuq, m);
1146 	}
1147 
1148 	if (prog > 0) {
1149 		/* Update the consumer index. */
1150 		sc->vte_cdata.vte_rx_cons = cons;
1151 		/*
1152 		 * Sync updated RX descriptors such that controller see
1153 		 * modified RX buffer addresses.
1154 		 */
1155 		bus_dmamap_sync(sc->vte_dmatag,
1156 		    sc->vte_cdata.vte_rx_ring_map, 0,
1157 		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1158 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1159 #ifdef notyet
1160 		/*
1161 		 * Update residue counter.  Controller does not
1162 		 * keep track of number of available RX descriptors
1163 		 * such that driver should have to update VTE_MRDCR
1164 		 * to make controller know how many free RX
1165 		 * descriptors were added to controller.  This is
1166 		 * a similar mechanism used in VIA velocity
1167 		 * controllers and it indicates controller just
1168 		 * polls OWN bit of current RX descriptor pointer.
1169 		 * A couple of severe issues were seen on sample
1170 		 * board where the controller continuously emits TX
1171 		 * pause frames once RX pause threshold crossed.
1172 		 * Once triggered it never recovered form that
1173 		 * state, I couldn't find a way to make it back to
1174 		 * work at least.  This issue effectively
1175 		 * disconnected the system from network.  Also, the
1176 		 * controller used 00:00:00:00:00:00 as source
1177 		 * station address of TX pause frame. Probably this
1178 		 * is one of reason why vendor recommends not to
1179 		 * enable flow control on R6040 controller.
1180 		 */
1181 		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1182 		    (((VTE_RX_RING_CNT * 2) / 10) <<
1183 		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1184 #endif
1185 	rnd_add_uint32(&sc->rnd_source, prog);
1186 	}
1187 }
1188 
1189 static void
1190 vte_tick(void *arg)
1191 {
1192 	struct vte_softc *sc;
1193 	int s = splnet();
1194 
1195 	sc = (struct vte_softc *)arg;
1196 
1197 	mii_tick(&sc->vte_mii);
1198 	vte_stats_update(sc);
1199 	vte_txeof(sc);
1200 	vte_ifwatchdog(&sc->vte_if);
1201 	callout_schedule(&sc->vte_tick_ch, hz);
1202 	splx(s);
1203 }
1204 
1205 static void
1206 vte_reset(struct vte_softc *sc)
1207 {
1208 	uint16_t mcr, mdcsc;
1209 	int i;
1210 
1211 	mdcsc = CSR_READ_2(sc, VTE_MDCSC);
1212 	mcr = CSR_READ_2(sc, VTE_MCR1);
1213 	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1214 	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1215 		DELAY(10);
1216 		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1217 			break;
1218 	}
1219 	if (i == 0)
1220 		aprint_error_dev(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1221 	/*
1222 	 * Follow the guide of vendor recommended way to reset MAC.
1223 	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1224 	 * not reliable so manually reset internal state machine.
1225 	 */
1226 	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1227 	CSR_WRITE_2(sc, VTE_MACSM, 0);
1228 	DELAY(5000);
1229 
1230 	/*
1231 	 * On some SoCs (like Vortex86DX3) MDC speed control register value
1232 	 * needs to be restored to original value instead of default one,
1233 	 * otherwise some PHY registers may fail to be read.
1234 	 */
1235 	if (mdcsc != MDCSC_DEFAULT)
1236 		CSR_WRITE_2(sc, VTE_MDCSC, mdcsc);
1237 }
1238 
1239 
1240 static int
1241 vte_init(struct ifnet *ifp)
1242 {
1243 	struct vte_softc *sc = ifp->if_softc;
1244 	bus_addr_t paddr;
1245 	uint8_t eaddr[ETHER_ADDR_LEN];
1246 	int s, error;
1247 
1248 	s = splnet();
1249 	/*
1250 	 * Cancel any pending I/O.
1251 	 */
1252 	vte_stop(ifp, 1);
1253 	/*
1254 	 * Reset the chip to a known state.
1255 	 */
1256 	vte_reset(sc);
1257 
1258 	if ((sc->vte_if.if_flags & IFF_UP) == 0) {
1259 		splx(s);
1260 		return 0;
1261 	}
1262 
1263 	/* Initialize RX descriptors. */
1264 	if (vte_init_rx_ring(sc) != 0) {
1265 		aprint_error_dev(sc->vte_dev, "no memory for RX buffers.\n");
1266 		vte_stop(ifp, 1);
1267 		splx(s);
1268 		return ENOMEM;
1269 	}
1270 	if (vte_init_tx_ring(sc) != 0) {
1271 		aprint_error_dev(sc->vte_dev, "no memory for TX buffers.\n");
1272 		vte_stop(ifp, 1);
1273 		splx(s);
1274 		return ENOMEM;
1275 	}
1276 
1277 	/*
1278 	 * Reprogram the station address.  Controller supports up
1279 	 * to 4 different station addresses so driver programs the
1280 	 * first station address as its own ethernet address and
1281 	 * configure the remaining three addresses as perfect
1282 	 * multicast addresses.
1283 	 */
1284 	memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1285 	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1286 	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1287 	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1288 
1289 	/* Set TX descriptor base addresses. */
1290 	paddr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
1291 	DPRINTF(("tx paddr 0x%x\n", (u_int)paddr));
1292 	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1293 	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1294 
1295 	/* Set RX descriptor base addresses. */
1296 	paddr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
1297 	DPRINTF(("rx paddr 0x%x\n", (u_int)paddr));
1298 	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1299 	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1300 	/*
1301 	 * Initialize RX descriptor residue counter and set RX
1302 	 * pause threshold to 20% of available RX descriptors.
1303 	 * See comments on vte_rxeof() for details on flow control
1304 	 * issues.
1305 	 */
1306 	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1307 	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1308 
1309 	/*
1310 	 * Always use maximum frame size that controller can
1311 	 * support.  Otherwise received frames that has longer
1312 	 * frame length than vte(4) MTU would be silently dropped
1313 	 * in controller.  This would break path-MTU discovery as
1314 	 * sender wouldn't get any responses from receiver. The
1315 	 * RX buffer size should be multiple of 4.
1316 	 * Note, jumbo frames are silently ignored by controller
1317 	 * and even MAC counters do not detect them.
1318 	 */
1319 	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1320 
1321 	/* Configure FIFO. */
1322 	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1323 	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1324 	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1325 
1326 	/*
1327 	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1328 	 * control configuration is done after detecting a valid
1329 	 * link.  Note, we don't generate early interrupt here
1330 	 * as well since FreeBSD does not have interrupt latency
1331 	 * problems like Windows.
1332 	 */
1333 	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1334 	/*
1335 	 * We manually keep track of PHY status changes to
1336 	 * configure resolved duplex and flow control since only
1337 	 * duplex configuration can be automatically reflected to
1338 	 * MCR0.
1339 	 */
1340 	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1341 	    MCR1_EXCESS_COL_RETRY_16);
1342 
1343 	/* Initialize RX filter. */
1344 	vte_rxfilter(sc);
1345 
1346 	/* Disable TX/RX interrupt moderation control. */
1347 	CSR_WRITE_2(sc, VTE_MRICR, 0);
1348 	CSR_WRITE_2(sc, VTE_MTICR, 0);
1349 
1350 	/* Enable MAC event counter interrupts. */
1351 	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1352 	/* Clear MAC statistics. */
1353 	vte_stats_clear(sc);
1354 
1355 	/* Acknowledge all pending interrupts and clear it. */
1356 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1357 	CSR_WRITE_2(sc, VTE_MISR, 0);
1358 	DPRINTF(("before ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER),
1359 		CSR_READ_2(sc, VTE_MISR)));
1360 
1361 	sc->vte_flags &= ~VTE_FLAG_LINK;
1362 	ifp->if_flags |= IFF_RUNNING;
1363 
1364 	/* calling mii_mediachg will call back vte_start_mac() */
1365 	if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO)
1366 		error = 0;
1367 	else if (error != 0) {
1368 		aprint_error_dev(sc->vte_dev, "could not set media\n");
1369 		splx(s);
1370 		return error;
1371 	}
1372 
1373 	callout_schedule(&sc->vte_tick_ch, hz);
1374 
1375 	DPRINTF(("ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER),
1376 		CSR_READ_2(sc, VTE_MISR)));
1377 	splx(s);
1378 	return 0;
1379 }
1380 
1381 static void
1382 vte_stop(struct ifnet *ifp, int disable)
1383 {
1384 	struct vte_softc *sc = ifp->if_softc;
1385 	struct vte_txdesc *txd;
1386 	struct vte_rxdesc *rxd;
1387 	int i;
1388 
1389 	DPRINTF(("vte_stop if_flags 0x%x\n", ifp->if_flags));
1390 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1391 		return;
1392 	/*
1393 	 * Mark the interface down and cancel the watchdog timer.
1394 	 */
1395 	ifp->if_flags &= ~IFF_RUNNING;
1396 	sc->vte_flags &= ~VTE_FLAG_LINK;
1397 	callout_stop(&sc->vte_tick_ch);
1398 	sc->vte_watchdog_timer = 0;
1399 	vte_stats_update(sc);
1400 	/* Disable interrupts. */
1401 	CSR_WRITE_2(sc, VTE_MIER, 0);
1402 	CSR_WRITE_2(sc, VTE_MECIER, 0);
1403 	/* Stop RX/TX MACs. */
1404 	vte_stop_mac(sc);
1405 	/* Clear interrupts. */
1406 	CSR_READ_2(sc, VTE_MISR);
1407 	/*
1408 	 * Free TX/RX mbufs still in the queues.
1409 	 */
1410 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1411 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1412 		if (rxd->rx_m != NULL) {
1413 			bus_dmamap_sync(sc->vte_dmatag,
1414 			    rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
1415 			    BUS_DMASYNC_POSTREAD);
1416 			bus_dmamap_unload(sc->vte_dmatag,
1417 			    rxd->rx_dmamap);
1418 			m_freem(rxd->rx_m);
1419 			rxd->rx_m = NULL;
1420 		}
1421 	}
1422 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1423 		txd = &sc->vte_cdata.vte_txdesc[i];
1424 		if (txd->tx_m != NULL) {
1425 			bus_dmamap_sync(sc->vte_dmatag,
1426 			    txd->tx_dmamap, 0, txd->tx_dmamap->dm_mapsize,
1427 			    BUS_DMASYNC_POSTWRITE);
1428 			bus_dmamap_unload(sc->vte_dmatag,
1429 			    txd->tx_dmamap);
1430 			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1431 				m_freem(txd->tx_m);
1432 			txd->tx_m = NULL;
1433 			txd->tx_flags &= ~VTE_TXMBUF;
1434 		}
1435 	}
1436 	/* Free TX mbuf pools used for deep copy. */
1437 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1438 		m_freem(sc->vte_cdata.vte_txmbufs[i]);
1439 		sc->vte_cdata.vte_txmbufs[i] = NULL;
1440 	}
1441 }
1442 
1443 static void
1444 vte_start_mac(struct vte_softc *sc)
1445 {
1446 	struct ifnet *ifp = &sc->vte_if;
1447 	uint16_t mcr;
1448 	int i;
1449 
1450 	/* Enable RX/TX MACs. */
1451 	mcr = CSR_READ_2(sc, VTE_MCR0);
1452 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1453 	    (MCR0_RX_ENB | MCR0_TX_ENB) &&
1454 	    (ifp->if_flags & IFF_RUNNING) != 0) {
1455 		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1456 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1457 		for (i = VTE_TIMEOUT; i > 0; i--) {
1458 			mcr = CSR_READ_2(sc, VTE_MCR0);
1459 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1460 			    (MCR0_RX_ENB | MCR0_TX_ENB))
1461 				break;
1462 			DELAY(10);
1463 		}
1464 		if (i == 0)
1465 			aprint_error_dev(sc->vte_dev,
1466 			    "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1467 	}
1468 	vte_rxfilter(sc);
1469 }
1470 
1471 static void
1472 vte_stop_mac(struct vte_softc *sc)
1473 {
1474 	uint16_t mcr;
1475 	int i;
1476 
1477 	/* Disable RX/TX MACs. */
1478 	mcr = CSR_READ_2(sc, VTE_MCR0);
1479 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1480 		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1481 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1482 		for (i = VTE_TIMEOUT; i > 0; i--) {
1483 			mcr = CSR_READ_2(sc, VTE_MCR0);
1484 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1485 				break;
1486 			DELAY(10);
1487 		}
1488 		if (i == 0)
1489 			aprint_error_dev(sc->vte_dev,
1490 			    "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1491 	}
1492 }
1493 
1494 static int
1495 vte_init_tx_ring(struct vte_softc *sc)
1496 {
1497 	struct vte_tx_desc *desc;
1498 	struct vte_txdesc *txd;
1499 	bus_addr_t addr;
1500 	int i;
1501 
1502 	sc->vte_cdata.vte_tx_prod = 0;
1503 	sc->vte_cdata.vte_tx_cons = 0;
1504 	sc->vte_cdata.vte_tx_cnt = 0;
1505 
1506 	/* Pre-allocate TX mbufs for deep copy. */
1507 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1508 		sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT,
1509 		    MT_DATA, M_PKTHDR);
1510 		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1511 			return (ENOBUFS);
1512 		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1513 		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1514 	}
1515 	desc = sc->vte_cdata.vte_tx_ring;
1516 	bzero(desc, VTE_TX_RING_SZ);
1517 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1518 		txd = &sc->vte_cdata.vte_txdesc[i];
1519 		txd->tx_m = NULL;
1520 		if (i != VTE_TX_RING_CNT - 1)
1521 			addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr +
1522 			    sizeof(struct vte_tx_desc) * (i + 1);
1523 		else
1524 			addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr +
1525 			    sizeof(struct vte_tx_desc) * 0;
1526 		desc = &sc->vte_cdata.vte_tx_ring[i];
1527 		desc->dtnp = htole32(addr);
1528 		DPRINTF(("tx ring desc %d addr 0x%x\n", i, (u_int)addr));
1529 		txd->tx_desc = desc;
1530 	}
1531 
1532 	bus_dmamap_sync(sc->vte_dmatag,
1533 	    sc->vte_cdata.vte_tx_ring_map, 0,
1534 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
1535 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1536 	return (0);
1537 }
1538 
1539 static int
1540 vte_init_rx_ring(struct vte_softc *sc)
1541 {
1542 	struct vte_rx_desc *desc;
1543 	struct vte_rxdesc *rxd;
1544 	bus_addr_t addr;
1545 	int i;
1546 
1547 	sc->vte_cdata.vte_rx_cons = 0;
1548 	desc = sc->vte_cdata.vte_rx_ring;
1549 	bzero(desc, VTE_RX_RING_SZ);
1550 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1551 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1552 		rxd->rx_m = NULL;
1553 		if (i != VTE_RX_RING_CNT - 1)
1554 			addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr
1555 			    + sizeof(struct vte_rx_desc) * (i + 1);
1556 		else
1557 			addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr
1558 			    + sizeof(struct vte_rx_desc) * 0;
1559 		desc = &sc->vte_cdata.vte_rx_ring[i];
1560 		desc->drnp = htole32(addr);
1561 		DPRINTF(("rx ring desc %d addr 0x%x\n", i, (u_int)addr));
1562 		rxd->rx_desc = desc;
1563 		if (vte_newbuf(sc, rxd) != 0)
1564 			return (ENOBUFS);
1565 	}
1566 
1567 	bus_dmamap_sync(sc->vte_dmatag,
1568 	    sc->vte_cdata.vte_rx_ring_map, 0,
1569 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1570 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1571 
1572 	return (0);
1573 }
1574 
1575 static void
1576 vte_rxfilter(struct vte_softc *sc)
1577 {
1578 	struct ethercom *ec = &sc->vte_ec;
1579 	struct ether_multistep step;
1580 	struct ether_multi *enm;
1581 	struct ifnet *ifp;
1582 	uint8_t *eaddr;
1583 	uint32_t crc;
1584 	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1585 	uint16_t mchash[4], mcr;
1586 	int i, nperf;
1587 
1588 	ifp = &sc->vte_if;
1589 
1590 	DPRINTF(("vte_rxfilter\n"));
1591 	memset(mchash, 0, sizeof(mchash));
1592 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1593 		rxfilt_perf[i][0] = 0xFFFF;
1594 		rxfilt_perf[i][1] = 0xFFFF;
1595 		rxfilt_perf[i][2] = 0xFFFF;
1596 	}
1597 
1598 	mcr = CSR_READ_2(sc, VTE_MCR0);
1599 	DPRINTF(("vte_rxfilter mcr 0x%x\n", mcr));
1600 	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1601 	if ((ifp->if_flags & IFF_BROADCAST) == 0)
1602 		mcr |= MCR0_BROADCAST_DIS;
1603 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1604 		if ((ifp->if_flags & IFF_PROMISC) != 0)
1605 			mcr |= MCR0_PROMISC;
1606 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1607 			mcr |= MCR0_MULTICAST;
1608 		mchash[0] = 0xFFFF;
1609 		mchash[1] = 0xFFFF;
1610 		mchash[2] = 0xFFFF;
1611 		mchash[3] = 0xFFFF;
1612 		goto chipit;
1613 	}
1614 
1615 	ETHER_LOCK(ec);
1616 	ETHER_FIRST_MULTI(step, ec, enm);
1617 	nperf = 0;
1618 	while (enm != NULL) {
1619 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)
1620 		    != 0) {
1621 			sc->vte_if.if_flags |= IFF_ALLMULTI;
1622 			mcr |= MCR0_MULTICAST;
1623 			mchash[0] = 0xFFFF;
1624 			mchash[1] = 0xFFFF;
1625 			mchash[2] = 0xFFFF;
1626 			mchash[3] = 0xFFFF;
1627 			ETHER_UNLOCK(ec);
1628 			goto chipit;
1629 		}
1630 		/*
1631 		 * Program the first 3 multicast groups into
1632 		 * the perfect filter.  For all others, use the
1633 		 * hash table.
1634 		 */
1635 		if (nperf < VTE_RXFILT_PERFECT_CNT) {
1636 			eaddr = enm->enm_addrlo;
1637 			rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0];
1638 			rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2];
1639 			rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4];
1640 			nperf++;
1641 		} else {
1642 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1643 			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1644 		}
1645 		ETHER_NEXT_MULTI(step, enm);
1646 	}
1647 	ETHER_UNLOCK(ec);
1648 	if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1649 	    mchash[3] != 0)
1650 		mcr |= MCR0_MULTICAST;
1651 
1652 chipit:
1653 	/* Program multicast hash table. */
1654 	DPRINTF(("chipit write multicast\n"));
1655 	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1656 	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1657 	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1658 	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1659 	/* Program perfect filter table. */
1660 	DPRINTF(("chipit write perfect filter\n"));
1661 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1662 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1663 		    rxfilt_perf[i][0]);
1664 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1665 		    rxfilt_perf[i][1]);
1666 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1667 		    rxfilt_perf[i][2]);
1668 	}
1669 	DPRINTF(("chipit mcr0 0x%x\n", mcr));
1670 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1671 	DPRINTF(("chipit read mcro\n"));
1672 	CSR_READ_2(sc, VTE_MCR0);
1673 	DPRINTF(("chipit done\n"));
1674 }
1675 
1676 /*
1677  * Set up sysctl(3) MIB, hw.vte.* - Individual controllers will be
1678  * set up in vte_pci_attach()
1679  */
1680 SYSCTL_SETUP(sysctl_vte, "sysctl vte subtree setup")
1681 {
1682 	int rc;
1683 	const struct sysctlnode *node;
1684 
1685 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1686 	    0, CTLTYPE_NODE, "vte",
1687 	    SYSCTL_DESCR("vte interface controls"),
1688 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1689 		goto err;
1690 	}
1691 
1692 	vte_root_num = node->sysctl_num;
1693 	return;
1694 
1695 err:
1696 	aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1697 }
1698 
1699 static int
1700 vte_sysctl_intrxct(SYSCTLFN_ARGS)
1701 {
1702 	int error, t;
1703 	struct sysctlnode node;
1704 	struct vte_softc *sc;
1705 
1706 	node = *rnode;
1707 	sc = node.sysctl_data;
1708 	t = sc->vte_int_rx_mod;
1709 	node.sysctl_data = &t;
1710 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1711 	if (error || newp == NULL)
1712 		return error;
1713 	if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX)
1714 		return EINVAL;
1715 
1716 	sc->vte_int_rx_mod = t;
1717 	vte_miibus_statchg(&sc->vte_if);
1718 	return 0;
1719 }
1720 
1721 static int
1722 vte_sysctl_inttxct(SYSCTLFN_ARGS)
1723 {
1724 	int error, t;
1725 	struct sysctlnode node;
1726 	struct vte_softc *sc;
1727 
1728 	node = *rnode;
1729 	sc = node.sysctl_data;
1730 	t = sc->vte_int_tx_mod;
1731 	node.sysctl_data = &t;
1732 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1733 	if (error || newp == NULL)
1734 		return error;
1735 
1736 	if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX)
1737 		return EINVAL;
1738 	sc->vte_int_tx_mod = t;
1739 	vte_miibus_statchg(&sc->vte_if);
1740 	return 0;
1741 }
1742