xref: /netbsd-src/sys/dev/pci/if_vte.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: if_vte.c,v 1.20 2018/06/26 06:48:01 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Manuel Bouyer.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*-
28  * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice unmodified, this list of conditions, and the following
36  *    disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53 /* FreeBSD: src/sys/dev/vte/if_vte.c,v 1.2 2010/12/31 01:23:04 yongari Exp */
54 
55 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: if_vte.c,v 1.20 2018/06/26 06:48:01 msaitoh Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/mbuf.h>
63 #include <sys/protosw.h>
64 #include <sys/socket.h>
65 #include <sys/ioctl.h>
66 #include <sys/errno.h>
67 #include <sys/malloc.h>
68 #include <sys/kernel.h>
69 #include <sys/device.h>
70 #include <sys/sysctl.h>
71 
72 #include <net/if.h>
73 #include <net/if_media.h>
74 #include <net/if_types.h>
75 #include <net/if_dl.h>
76 #include <net/route.h>
77 #include <net/netisr.h>
78 #include <net/bpf.h>
79 
80 #include <sys/rndsource.h>
81 
82 #include "opt_inet.h"
83 #include <net/if_ether.h>
84 #ifdef INET
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip.h>
89 #include <netinet/if_inarp.h>
90 #endif
91 
92 #include <sys/bus.h>
93 #include <sys/intr.h>
94 
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98 
99 #include <dev/mii/mii.h>
100 #include <dev/mii/miivar.h>
101 
102 #include <dev/pci/if_vtereg.h>
103 #include <dev/pci/if_vtevar.h>
104 
105 static int	vte_match(device_t, cfdata_t, void *);
106 static void	vte_attach(device_t, device_t, void *);
107 static int	vte_detach(device_t, int);
108 static int	vte_dma_alloc(struct vte_softc *);
109 static void	vte_dma_free(struct vte_softc *);
110 static struct vte_txdesc *
111 		vte_encap(struct vte_softc *, struct mbuf **);
112 static void	vte_get_macaddr(struct vte_softc *);
113 static int	vte_init(struct ifnet *);
114 static int	vte_init_rx_ring(struct vte_softc *);
115 static int	vte_init_tx_ring(struct vte_softc *);
116 static int	vte_intr(void *);
117 static int	vte_ifioctl(struct ifnet *, u_long, void *);
118 static void	vte_mac_config(struct vte_softc *);
119 static int	vte_miibus_readreg(device_t, int, int);
120 static void	vte_miibus_statchg(struct ifnet *);
121 static void	vte_miibus_writereg(device_t, int, int, int);
122 static int	vte_mediachange(struct ifnet *);
123 static int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
124 static void	vte_reset(struct vte_softc *);
125 static void	vte_rxeof(struct vte_softc *);
126 static void	vte_rxfilter(struct vte_softc *);
127 static bool	vte_shutdown(device_t, int);
128 static bool	vte_suspend(device_t, const pmf_qual_t *);
129 static bool	vte_resume(device_t, const pmf_qual_t *);
130 static void	vte_ifstart(struct ifnet *);
131 static void	vte_start_mac(struct vte_softc *);
132 static void	vte_stats_clear(struct vte_softc *);
133 static void	vte_stats_update(struct vte_softc *);
134 static void	vte_stop(struct ifnet *, int);
135 static void	vte_stop_mac(struct vte_softc *);
136 static void	vte_tick(void *);
137 static void	vte_txeof(struct vte_softc *);
138 static void	vte_ifwatchdog(struct ifnet *);
139 
140 static int vte_sysctl_intrxct(SYSCTLFN_PROTO);
141 static int vte_sysctl_inttxct(SYSCTLFN_PROTO);
142 static int vte_root_num;
143 
144 #define DPRINTF(a)
145 
146 CFATTACH_DECL3_NEW(vte, sizeof(struct vte_softc),
147     vte_match, vte_attach, vte_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
148 
149 
150 static int
151 vte_match(device_t parent, cfdata_t cf, void *aux)
152 {
153 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
154 
155 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RDC &&
156 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_RDC_R6040)
157 		return 1;
158 
159 	return 0;
160 }
161 
162 static void
163 vte_attach(device_t parent, device_t self, void *aux)
164 {
165 	struct vte_softc *sc = device_private(self);
166 	struct pci_attach_args * const pa = (struct pci_attach_args *)aux;
167 	struct ifnet * const ifp = &sc->vte_if;
168 	int h_valid;
169 	pcireg_t reg, csr;
170 	pci_intr_handle_t intrhandle;
171 	const char *intrstr;
172 	int error;
173 	const struct sysctlnode *node;
174 	int vte_nodenum;
175 	char intrbuf[PCI_INTRSTR_LEN];
176 
177 	sc->vte_dev = self;
178 
179 	callout_init(&sc->vte_tick_ch, 0);
180 
181 	/* Map the device. */
182 	h_valid = 0;
183 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BMEM);
184 	if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_MEM) {
185 		h_valid = (pci_mapreg_map(pa, VTE_PCI_BMEM,
186 		    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
187 		    0, &sc->vte_bustag, &sc->vte_bushandle, NULL, NULL) == 0);
188 	}
189 	if (h_valid == 0) {
190 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BIO);
191 		if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
192 			h_valid = (pci_mapreg_map(pa, VTE_PCI_BIO,
193 			    PCI_MAPREG_TYPE_IO, 0, &sc->vte_bustag,
194 			    &sc->vte_bushandle, NULL, NULL) == 0);
195 		}
196 	}
197 	if (h_valid == 0) {
198 		aprint_error_dev(self, "unable to map device registers\n");
199 		return;
200 	}
201 	sc->vte_dmatag = pa->pa_dmat;
202 	/* Enable the device. */
203 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
204 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
205 	    csr | PCI_COMMAND_MASTER_ENABLE);
206 
207 	pci_aprint_devinfo(pa, NULL);
208 
209 	/* Reset the ethernet controller. */
210 	vte_reset(sc);
211 
212 	if ((error = vte_dma_alloc(sc)) != 0)
213 		return;
214 
215 	/* Load station address. */
216 	vte_get_macaddr(sc);
217 
218 	aprint_normal_dev(self, "Ethernet address %s\n",
219 	    ether_sprintf(sc->vte_eaddr));
220 
221 	/* Map and establish interrupts */
222 	if (pci_intr_map(pa, &intrhandle)) {
223 		aprint_error_dev(self, "couldn't map interrupt\n");
224 		return;
225 	}
226 	intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf,
227 	    sizeof(intrbuf));
228 	sc->vte_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
229 	    vte_intr, sc);
230 	if (sc->vte_ih == NULL) {
231 		aprint_error_dev(self, "couldn't establish interrupt");
232 		if (intrstr != NULL)
233 			aprint_error(" at %s", intrstr);
234 		aprint_error("\n");
235 		return;
236 	}
237 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
238 
239 	sc->vte_if.if_softc = sc;
240 	sc->vte_mii.mii_ifp = ifp;
241 	sc->vte_mii.mii_readreg = vte_miibus_readreg;
242 	sc->vte_mii.mii_writereg = vte_miibus_writereg;
243 	sc->vte_mii.mii_statchg = vte_miibus_statchg;
244 	sc->vte_ec.ec_mii = &sc->vte_mii;
245 	ifmedia_init(&sc->vte_mii.mii_media, IFM_IMASK, vte_mediachange,
246 	    ether_mediastatus);
247 	mii_attach(self, &sc->vte_mii, 0xffffffff, MII_PHY_ANY,
248 	    MII_OFFSET_ANY, 0);
249 	if (LIST_FIRST(&sc->vte_mii.mii_phys) == NULL) {
250 		ifmedia_add(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
251 		ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE);
252 	} else
253 		ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_AUTO);
254 
255 	/*
256 	 * We can support 802.1Q VLAN-sized frames.
257 	 */
258 	sc->vte_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
259 
260         strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
261         ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
262         ifp->if_ioctl = vte_ifioctl;
263         ifp->if_start = vte_ifstart;
264         ifp->if_watchdog = vte_ifwatchdog;
265         ifp->if_init = vte_init;
266         ifp->if_stop = vte_stop;
267         ifp->if_timer = 0;
268         IFQ_SET_READY(&ifp->if_snd);
269         if_attach(ifp);
270 	if_deferred_start_init(ifp, NULL);
271         ether_ifattach(&(sc)->vte_if, (sc)->vte_eaddr);
272 
273 	if (pmf_device_register1(self, vte_suspend, vte_resume, vte_shutdown))
274 		pmf_class_network_register(self, ifp);
275 	else
276 		aprint_error_dev(self, "couldn't establish power handler\n");
277 
278         rnd_attach_source(&sc->rnd_source, device_xname(self),
279             RND_TYPE_NET, RND_FLAG_DEFAULT);
280 
281 	if (sysctl_createv(&sc->vte_clog, 0, NULL, &node,
282 	    0, CTLTYPE_NODE, device_xname(sc->vte_dev),
283 	    SYSCTL_DESCR("vte per-controller controls"),
284 	    NULL, 0, NULL, 0, CTL_HW, vte_root_num, CTL_CREATE,
285 	    CTL_EOL) != 0) {
286 		aprint_normal_dev(sc->vte_dev, "couldn't create sysctl node\n");
287 		return;
288 	}
289 	vte_nodenum = node->sysctl_num;
290 	if (sysctl_createv(&sc->vte_clog, 0, NULL, &node,
291 	    CTLFLAG_READWRITE,
292 	    CTLTYPE_INT, "int_rxct",
293 	    SYSCTL_DESCR("vte RX interrupt moderation packet counter"),
294 	    vte_sysctl_intrxct, 0, (void *)sc,
295 	    0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE,
296 	    CTL_EOL) != 0) {
297 		aprint_normal_dev(sc->vte_dev,
298 		    "couldn't create int_rxct sysctl node\n");
299 	}
300 	if (sysctl_createv(&sc->vte_clog, 0, NULL, &node,
301 	    CTLFLAG_READWRITE,
302 	    CTLTYPE_INT, "int_txct",
303 	    SYSCTL_DESCR("vte TX interrupt moderation packet counter"),
304 	    vte_sysctl_inttxct, 0, (void *)sc,
305 	    0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE,
306 	    CTL_EOL) != 0) {
307 		aprint_normal_dev(sc->vte_dev,
308 		    "couldn't create int_txct sysctl node\n");
309 	}
310 }
311 
312 static int
313 vte_detach(device_t dev, int flags __unused)
314 {
315 	struct vte_softc *sc = device_private(dev);
316 	struct ifnet *ifp = &sc->vte_if;
317 	int s;
318 
319 	s = splnet();
320 	/* Stop the interface. Callouts are stopped in it. */
321 	vte_stop(ifp, 1);
322 	splx(s);
323 
324 	pmf_device_deregister(dev);
325 
326 	mii_detach(&sc->vte_mii, MII_PHY_ANY, MII_OFFSET_ANY);
327 	ifmedia_delete_instance(&sc->vte_mii.mii_media, IFM_INST_ANY);
328 
329 	ether_ifdetach(ifp);
330 	if_detach(ifp);
331 
332 	vte_dma_free(sc);
333 
334 	return (0);
335 }
336 
337 static int
338 vte_miibus_readreg(device_t dev, int phy, int reg)
339 {
340 	struct vte_softc *sc = device_private(dev);
341 	int i;
342 
343 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
344 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
345 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
346 		DELAY(5);
347 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
348 			break;
349 	}
350 
351 	if (i == 0) {
352 		aprint_error_dev(sc->vte_dev, "phy read timeout : %d\n", reg);
353 		return (0);
354 	}
355 
356 	return (CSR_READ_2(sc, VTE_MMRD));
357 }
358 
359 static void
360 vte_miibus_writereg(device_t dev, int phy, int reg, int val)
361 {
362 	struct vte_softc *sc = device_private(dev);
363 	int i;
364 
365 	CSR_WRITE_2(sc, VTE_MMWD, val);
366 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
367 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
368 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
369 		DELAY(5);
370 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
371 			break;
372 	}
373 
374 	if (i == 0)
375 		aprint_error_dev(sc->vte_dev, "phy write timeout : %d\n", reg);
376 
377 }
378 
379 static void
380 vte_miibus_statchg(struct ifnet *ifp)
381 {
382 	struct vte_softc *sc = ifp->if_softc;
383 	uint16_t val;
384 
385 	DPRINTF(("vte_miibus_statchg 0x%x 0x%x\n",
386 	    sc->vte_mii.mii_media_status, sc->vte_mii.mii_media_active));
387 
388 	sc->vte_flags &= ~VTE_FLAG_LINK;
389 	if ((sc->vte_mii.mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
390 	    (IFM_ACTIVE | IFM_AVALID)) {
391 		switch (IFM_SUBTYPE(sc->vte_mii.mii_media_active)) {
392 		case IFM_10_T:
393 		case IFM_100_TX:
394 			sc->vte_flags |= VTE_FLAG_LINK;
395 			break;
396 		default:
397 			break;
398 		}
399 	}
400 
401 	/* Stop RX/TX MACs. */
402 	vte_stop_mac(sc);
403 	/* Program MACs with resolved duplex and flow control. */
404 	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
405 		/*
406 		 * Timer waiting time : (63 + TIMER * 64) MII clock.
407 		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
408 		 */
409 		if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX)
410 			val = 18 << VTE_IM_TIMER_SHIFT;
411 		else
412 			val = 1 << VTE_IM_TIMER_SHIFT;
413 		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
414 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
415 		CSR_WRITE_2(sc, VTE_MRICR, val);
416 
417 		if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX)
418 			val = 18 << VTE_IM_TIMER_SHIFT;
419 		else
420 			val = 1 << VTE_IM_TIMER_SHIFT;
421 		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
422 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
423 		CSR_WRITE_2(sc, VTE_MTICR, val);
424 
425 		vte_mac_config(sc);
426 		vte_start_mac(sc);
427 		DPRINTF(("vte_miibus_statchg: link\n"));
428 	}
429 }
430 
431 static void
432 vte_get_macaddr(struct vte_softc *sc)
433 {
434 	uint16_t mid;
435 
436 	/*
437 	 * It seems there is no way to reload station address and
438 	 * it is supposed to be set by BIOS.
439 	 */
440 	mid = CSR_READ_2(sc, VTE_MID0L);
441 	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
442 	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
443 	mid = CSR_READ_2(sc, VTE_MID0M);
444 	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
445 	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
446 	mid = CSR_READ_2(sc, VTE_MID0H);
447 	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
448 	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
449 }
450 
451 
452 static int
453 vte_dma_alloc(struct vte_softc *sc)
454 {
455 	struct vte_txdesc *txd;
456 	struct vte_rxdesc *rxd;
457 	int error, i, rseg;
458 
459 	/* create DMA map for TX ring */
460 	error = bus_dmamap_create(sc->vte_dmatag, VTE_TX_RING_SZ, 1,
461 	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
462 	    &sc->vte_cdata.vte_tx_ring_map);
463 	if (error) {
464 		aprint_error_dev(sc->vte_dev,
465 		    "could not create dma map for TX ring (%d)\n",
466 		    error);
467 		goto fail;
468 	}
469 	/* Allocate and map DMA'able memory and load the DMA map for TX ring. */
470 	error = bus_dmamem_alloc(sc->vte_dmatag, VTE_TX_RING_SZ,
471 	    VTE_TX_RING_ALIGN, 0,
472 	    sc->vte_cdata.vte_tx_ring_seg, 1, &rseg,
473 	    BUS_DMA_NOWAIT);
474 	if (error != 0) {
475 		aprint_error_dev(sc->vte_dev,
476 		    "could not allocate DMA'able memory for TX ring (%d).\n",
477 		    error);
478 		goto fail;
479 	}
480 	KASSERT(rseg == 1);
481 	error = bus_dmamem_map(sc->vte_dmatag,
482 	    sc->vte_cdata.vte_tx_ring_seg, 1,
483 	    VTE_TX_RING_SZ, (void **)(&sc->vte_cdata.vte_tx_ring),
484 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
485 	if (error != 0) {
486 		aprint_error_dev(sc->vte_dev,
487 		    "could not map DMA'able memory for TX ring (%d).\n",
488 		    error);
489 		goto fail;
490 	}
491 	memset(sc->vte_cdata.vte_tx_ring, 0, VTE_TX_RING_SZ);
492 	error = bus_dmamap_load(sc->vte_dmatag,
493 	    sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
494 	    VTE_TX_RING_SZ, NULL,
495 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
496 	if (error != 0) {
497 		aprint_error_dev(sc->vte_dev,
498 		    "could not load DMA'able memory for TX ring.\n");
499 		goto fail;
500 	}
501 
502 	/* create DMA map for RX ring */
503 	error = bus_dmamap_create(sc->vte_dmatag, VTE_RX_RING_SZ, 1,
504 	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
505 	    &sc->vte_cdata.vte_rx_ring_map);
506 	if (error) {
507 		aprint_error_dev(sc->vte_dev,
508 		    "could not create dma map for RX ring (%d)\n",
509 		    error);
510 		goto fail;
511 	}
512 	/* Allocate and map DMA'able memory and load the DMA map for RX ring. */
513 	error = bus_dmamem_alloc(sc->vte_dmatag, VTE_RX_RING_SZ,
514 	    VTE_RX_RING_ALIGN, 0,
515 	    sc->vte_cdata.vte_rx_ring_seg, 1, &rseg,
516 	    BUS_DMA_NOWAIT);
517 	if (error != 0) {
518 		aprint_error_dev(sc->vte_dev,
519 		    "could not allocate DMA'able memory for RX ring (%d).\n",
520 		    error);
521 		goto fail;
522 	}
523 	KASSERT(rseg == 1);
524 	error = bus_dmamem_map(sc->vte_dmatag,
525 	    sc->vte_cdata.vte_rx_ring_seg, 1,
526 	    VTE_RX_RING_SZ, (void **)(&sc->vte_cdata.vte_rx_ring),
527 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
528 	if (error != 0) {
529 		aprint_error_dev(sc->vte_dev,
530 		    "could not map DMA'able memory for RX ring (%d).\n",
531 		    error);
532 		goto fail;
533 	}
534 	memset(sc->vte_cdata.vte_rx_ring, 0, VTE_RX_RING_SZ);
535 	error = bus_dmamap_load(sc->vte_dmatag,
536 	    sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
537 	    VTE_RX_RING_SZ, NULL,
538 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
539 	if (error != 0) {
540 		aprint_error_dev(sc->vte_dev,
541 		    "could not load DMA'able memory for RX ring (%d).\n",
542 		    error);
543 		goto fail;
544 	}
545 
546 	/* Create DMA maps for TX buffers. */
547 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
548 		txd = &sc->vte_cdata.vte_txdesc[i];
549 		txd->tx_m = NULL;
550 		txd->tx_dmamap = NULL;
551 		error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES,
552 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
553 		    &txd->tx_dmamap);
554 		if (error != 0) {
555 			aprint_error_dev(sc->vte_dev,
556 			    "could not create TX DMA map %d (%d).\n", i, error);
557 			goto fail;
558 		}
559 	}
560 	/* Create DMA maps for RX buffers. */
561 	if ((error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES,
562 	    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
563 	    &sc->vte_cdata.vte_rx_sparemap)) != 0) {
564 		aprint_error_dev(sc->vte_dev,
565 		    "could not create spare RX dmamap (%d).\n", error);
566 		goto fail;
567 	}
568 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
569 		rxd = &sc->vte_cdata.vte_rxdesc[i];
570 		rxd->rx_m = NULL;
571 		rxd->rx_dmamap = NULL;
572 		error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES,
573 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
574 		    &rxd->rx_dmamap);
575 		if (error != 0) {
576 			aprint_error_dev(sc->vte_dev,
577 			    "could not create RX dmamap %d (%d).\n", i, error);
578 			goto fail;
579 		}
580 	}
581 	return 0;
582 
583 fail:
584 	vte_dma_free(sc);
585 	return (error);
586 }
587 
588 static void
589 vte_dma_free(struct vte_softc *sc)
590 {
591 	struct vte_txdesc *txd;
592 	struct vte_rxdesc *rxd;
593 	int i;
594 
595 	/* TX buffers. */
596 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
597 		txd = &sc->vte_cdata.vte_txdesc[i];
598 		if (txd->tx_dmamap != NULL) {
599 			bus_dmamap_destroy(sc->vte_dmatag, txd->tx_dmamap);
600 			txd->tx_dmamap = NULL;
601 		}
602 	}
603 	/* RX buffers */
604 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
605 		rxd = &sc->vte_cdata.vte_rxdesc[i];
606 		if (rxd->rx_dmamap != NULL) {
607 			bus_dmamap_destroy(sc->vte_dmatag, rxd->rx_dmamap);
608 			rxd->rx_dmamap = NULL;
609 		}
610 	}
611 	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
612 		bus_dmamap_destroy(sc->vte_dmatag,
613 		    sc->vte_cdata.vte_rx_sparemap);
614 		sc->vte_cdata.vte_rx_sparemap = NULL;
615 	}
616 	/* TX descriptor ring. */
617 	if (sc->vte_cdata.vte_tx_ring_map != NULL) {
618 		bus_dmamap_unload(sc->vte_dmatag,
619 		    sc->vte_cdata.vte_tx_ring_map);
620 		bus_dmamap_destroy(sc->vte_dmatag,
621 		    sc->vte_cdata.vte_tx_ring_map);
622 	}
623 	if (sc->vte_cdata.vte_tx_ring != NULL) {
624 		bus_dmamem_unmap(sc->vte_dmatag,
625 		    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ);
626 		bus_dmamem_free(sc->vte_dmatag,
627 		    sc->vte_cdata.vte_tx_ring_seg, 1);
628 	}
629 	sc->vte_cdata.vte_tx_ring = NULL;
630 	sc->vte_cdata.vte_tx_ring_map = NULL;
631 	/* RX ring. */
632 	if (sc->vte_cdata.vte_rx_ring_map != NULL) {
633 		bus_dmamap_unload(sc->vte_dmatag,
634 		    sc->vte_cdata.vte_rx_ring_map);
635 		bus_dmamap_destroy(sc->vte_dmatag,
636 		    sc->vte_cdata.vte_rx_ring_map);
637 	}
638 	if (sc->vte_cdata.vte_rx_ring != NULL) {
639 		bus_dmamem_unmap(sc->vte_dmatag,
640 		    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ);
641 		bus_dmamem_free(sc->vte_dmatag,
642 		    sc->vte_cdata.vte_rx_ring_seg, 1);
643 	}
644 	sc->vte_cdata.vte_rx_ring = NULL;
645 	sc->vte_cdata.vte_rx_ring_map = NULL;
646 }
647 
648 static bool
649 vte_shutdown(device_t dev, int howto)
650 {
651 
652 	return (vte_suspend(dev, NULL));
653 }
654 
655 static bool
656 vte_suspend(device_t dev, const pmf_qual_t *qual)
657 {
658 	struct vte_softc *sc = device_private(dev);
659 	struct ifnet *ifp = &sc->vte_if;
660 
661 	DPRINTF(("vte_suspend if_flags 0x%x\n", ifp->if_flags));
662 	if ((ifp->if_flags & IFF_RUNNING) != 0)
663 		vte_stop(ifp, 1);
664 	return (0);
665 }
666 
667 static bool
668 vte_resume(device_t dev, const pmf_qual_t *qual)
669 {
670 	struct vte_softc *sc = device_private(dev);
671 	struct ifnet *ifp;
672 
673 	ifp = &sc->vte_if;
674 	if ((ifp->if_flags & IFF_UP) != 0) {
675 		ifp->if_flags &= ~IFF_RUNNING;
676 		vte_init(ifp);
677 	}
678 
679 	return (0);
680 }
681 
682 static struct vte_txdesc *
683 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
684 {
685 	struct vte_txdesc *txd;
686 	struct mbuf *m, *n;
687 	int copy, error, padlen;
688 
689 	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
690 	m = *m_head;
691 	/*
692 	 * Controller doesn't auto-pad, so we have to make sure pad
693 	 * short frames out to the minimum frame length.
694 	 */
695 	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
696 		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
697 	else
698 		padlen = 0;
699 
700 	/*
701 	 * Controller does not support multi-fragmented TX buffers.
702 	 * Controller spends most of its TX processing time in
703 	 * de-fragmenting TX buffers.  Either faster CPU or more
704 	 * advanced controller DMA engine is required to speed up
705 	 * TX path processing.
706 	 * To mitigate the de-fragmenting issue, perform deep copy
707 	 * from fragmented mbuf chains to a pre-allocated mbuf
708 	 * cluster with extra cost of kernel memory.  For frames
709 	 * that is composed of single TX buffer, the deep copy is
710 	 * bypassed.
711 	 */
712 	copy = 0;
713 	if (m->m_next != NULL)
714 		copy++;
715 	if (padlen > 0 && (M_READONLY(m) ||
716 	    padlen > M_TRAILINGSPACE(m)))
717 		copy++;
718 	if (copy != 0) {
719 		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
720 		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
721 		n->m_pkthdr.len = m->m_pkthdr.len;
722 		n->m_len = m->m_pkthdr.len;
723 		m = n;
724 		txd->tx_flags |= VTE_TXMBUF;
725 	}
726 
727 	if (padlen > 0) {
728 		/* Zero out the bytes in the pad area. */
729 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
730 		m->m_pkthdr.len += padlen;
731 		m->m_len = m->m_pkthdr.len;
732 	}
733 
734 	error = bus_dmamap_load_mbuf(sc->vte_dmatag, txd->tx_dmamap, m,
735 	    BUS_DMA_NOWAIT);
736 	if (error != 0) {
737 		txd->tx_flags &= ~VTE_TXMBUF;
738 		return (NULL);
739 	}
740 	KASSERT(txd->tx_dmamap->dm_nsegs == 1);
741 	bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0,
742 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
743 
744 	txd->tx_desc->dtlen =
745 	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
746 	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
747 	sc->vte_cdata.vte_tx_cnt++;
748 	/* Update producer index. */
749 	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
750 
751 	/* Finally hand over ownership to controller. */
752 	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
753 	txd->tx_m = m;
754 
755 	return (txd);
756 }
757 
758 static void
759 vte_ifstart(struct ifnet *ifp)
760 {
761 	struct vte_softc *sc = ifp->if_softc;
762 	struct vte_txdesc *txd;
763 	struct mbuf *m_head, *m;
764 	int enq;
765 
766 	ifp = &sc->vte_if;
767 
768 	DPRINTF(("vte_ifstart 0x%x 0x%x\n", ifp->if_flags, sc->vte_flags));
769 
770 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
771 	    IFF_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
772 		return;
773 
774 	for (enq = 0; !IFQ_IS_EMPTY(&ifp->if_snd); ) {
775 		/* Reserve one free TX descriptor. */
776 		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
777 			ifp->if_flags |= IFF_OACTIVE;
778 			break;
779 		}
780 		IFQ_POLL(&ifp->if_snd, m_head);
781 		if (m_head == NULL)
782 			break;
783 		/*
784 		 * Pack the data into the transmit ring. If we
785 		 * don't have room, set the OACTIVE flag and wait
786 		 * for the NIC to drain the ring.
787 		 */
788 		DPRINTF(("vte_encap:"));
789 		if ((txd = vte_encap(sc, &m_head)) == NULL) {
790 			DPRINTF((" failed\n"));
791 			break;
792 		}
793 		DPRINTF((" ok\n"));
794 		IFQ_DEQUEUE(&ifp->if_snd, m);
795 		KASSERT(m == m_head);
796 
797 		enq++;
798 		/*
799 		 * If there's a BPF listener, bounce a copy of this frame
800 		 * to him.
801 		 */
802 		bpf_mtap(ifp, m_head, BPF_D_OUT);
803 		/* Free consumed TX frame. */
804 		if ((txd->tx_flags & VTE_TXMBUF) != 0)
805 			m_freem(m_head);
806 	}
807 
808 	if (enq > 0) {
809 		bus_dmamap_sync(sc->vte_dmatag,
810 		    sc->vte_cdata.vte_tx_ring_map, 0,
811 		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
812 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
813 		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
814 		sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
815 	}
816 }
817 
818 static void
819 vte_ifwatchdog(struct ifnet *ifp)
820 {
821 	struct vte_softc *sc = ifp->if_softc;
822 
823 	if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
824 		return;
825 
826 	aprint_error_dev(sc->vte_dev, "watchdog timeout -- resetting\n");
827 	ifp->if_oerrors++;
828 	vte_init(ifp);
829 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
830 		vte_ifstart(ifp);
831 }
832 
833 static int
834 vte_mediachange(struct ifnet *ifp)
835 {
836 	int error;
837 	struct vte_softc *sc = ifp->if_softc;
838 
839 	if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO)
840 		error = 0;
841 	else if (error != 0) {
842 		aprint_error_dev(sc->vte_dev, "could not set media\n");
843 		return error;
844 	}
845 											return 0;
846 
847 }
848 
849 static int
850 vte_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
851 {
852 	struct vte_softc *sc = ifp->if_softc;
853 	int error, s;
854 
855 	s = splnet();
856 	error = ether_ioctl(ifp, cmd, data);
857 	if (error == ENETRESET) {
858 		DPRINTF(("vte_ifioctl if_flags 0x%x\n", ifp->if_flags));
859 		if (ifp->if_flags & IFF_RUNNING)
860 			vte_rxfilter(sc);
861 		error = 0;
862 	}
863 	splx(s);
864 	return error;
865 }
866 
867 static void
868 vte_mac_config(struct vte_softc *sc)
869 {
870 	uint16_t mcr;
871 
872 	mcr = CSR_READ_2(sc, VTE_MCR0);
873 	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
874 	if ((IFM_OPTIONS(sc->vte_mii.mii_media_active) & IFM_FDX) != 0) {
875 		mcr |= MCR0_FULL_DUPLEX;
876 #ifdef notyet
877 		if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
878 			mcr |= MCR0_FC_ENB;
879 		/*
880 		 * The data sheet is not clear whether the controller
881 		 * honors received pause frames or not.  The is no
882 		 * separate control bit for RX pause frame so just
883 		 * enable MCR0_FC_ENB bit.
884 		 */
885 		if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
886 			mcr |= MCR0_FC_ENB;
887 #endif
888 	}
889 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
890 }
891 
892 static void
893 vte_stats_clear(struct vte_softc *sc)
894 {
895 
896 	/* Reading counter registers clears its contents. */
897 	CSR_READ_2(sc, VTE_CNT_RX_DONE);
898 	CSR_READ_2(sc, VTE_CNT_MECNT0);
899 	CSR_READ_2(sc, VTE_CNT_MECNT1);
900 	CSR_READ_2(sc, VTE_CNT_MECNT2);
901 	CSR_READ_2(sc, VTE_CNT_MECNT3);
902 	CSR_READ_2(sc, VTE_CNT_TX_DONE);
903 	CSR_READ_2(sc, VTE_CNT_MECNT4);
904 	CSR_READ_2(sc, VTE_CNT_PAUSE);
905 }
906 
907 static void
908 vte_stats_update(struct vte_softc *sc)
909 {
910 	struct vte_hw_stats *stat;
911 	struct ifnet *ifp = &sc->vte_if;
912 	uint16_t value;
913 
914 	stat = &sc->vte_stats;
915 
916 	CSR_READ_2(sc, VTE_MECISR);
917 	/* RX stats. */
918 	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
919 	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
920 	stat->rx_bcast_frames += (value >> 8);
921 	stat->rx_mcast_frames += (value & 0xFF);
922 	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
923 	stat->rx_runts += (value >> 8);
924 	stat->rx_crcerrs += (value & 0xFF);
925 	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
926 	stat->rx_long_frames += (value & 0xFF);
927 	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
928 	stat->rx_fifo_full += (value >> 8);
929 	stat->rx_desc_unavail += (value & 0xFF);
930 
931 	/* TX stats. */
932 	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
933 	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
934 	stat->tx_underruns += (value >> 8);
935 	stat->tx_late_colls += (value & 0xFF);
936 
937 	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
938 	stat->tx_pause_frames += (value >> 8);
939 	stat->rx_pause_frames += (value & 0xFF);
940 
941 	/* Update ifp counters. */
942 	ifp->if_opackets = stat->tx_frames;
943 	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
944 	ifp->if_ipackets = stat->rx_frames;
945 	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
946 	    stat->rx_long_frames + stat->rx_fifo_full;
947 }
948 
949 static int
950 vte_intr(void *arg)
951 {
952 	struct vte_softc *sc = (struct vte_softc *)arg;
953 	struct ifnet *ifp = &sc->vte_if;
954 	uint16_t status;
955 	int n;
956 
957 	/* Reading VTE_MISR acknowledges interrupts. */
958 	status = CSR_READ_2(sc, VTE_MISR);
959 	DPRINTF(("vte_intr status 0x%x\n", status));
960 	if ((status & VTE_INTRS) == 0) {
961 		/* Not ours. */
962 		return 0;
963 	}
964 
965 	/* Disable interrupts. */
966 	CSR_WRITE_2(sc, VTE_MIER, 0);
967 	for (n = 8; (status & VTE_INTRS) != 0;) {
968 		if ((ifp->if_flags & IFF_RUNNING) == 0)
969 			break;
970 		if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
971 		    MISR_RX_FIFO_FULL)) != 0)
972 			vte_rxeof(sc);
973 		if ((status & MISR_TX_DONE) != 0)
974 			vte_txeof(sc);
975 		if ((status & MISR_EVENT_CNT_OFLOW) != 0)
976 			vte_stats_update(sc);
977 		if_schedule_deferred_start(ifp);
978 		if (--n > 0)
979 			status = CSR_READ_2(sc, VTE_MISR);
980 		else
981 			break;
982 	}
983 
984 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
985 		/* Re-enable interrupts. */
986 		CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
987 	}
988 	return 1;
989 }
990 
991 static void
992 vte_txeof(struct vte_softc *sc)
993 {
994 	struct ifnet *ifp;
995 	struct vte_txdesc *txd;
996 	uint16_t status;
997 	int cons, prog;
998 
999 	ifp = &sc->vte_if;
1000 
1001 	if (sc->vte_cdata.vte_tx_cnt == 0)
1002 		return;
1003 	bus_dmamap_sync(sc->vte_dmatag,
1004 	    sc->vte_cdata.vte_tx_ring_map, 0,
1005 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
1006 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1007 	cons = sc->vte_cdata.vte_tx_cons;
1008 	/*
1009 	 * Go through our TX list and free mbufs for those
1010 	 * frames which have been transmitted.
1011 	 */
1012 	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1013 		txd = &sc->vte_cdata.vte_txdesc[cons];
1014 		status = le16toh(txd->tx_desc->dtst);
1015 		if ((status & VTE_DTST_TX_OWN) != 0)
1016 			break;
1017 		if ((status & VTE_DTST_TX_OK) != 0)
1018 			ifp->if_collisions += (status & 0xf);
1019 		sc->vte_cdata.vte_tx_cnt--;
1020 		/* Reclaim transmitted mbufs. */
1021 		bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0,
1022 		    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1023 		bus_dmamap_unload(sc->vte_dmatag, txd->tx_dmamap);
1024 		if ((txd->tx_flags & VTE_TXMBUF) == 0)
1025 			m_freem(txd->tx_m);
1026 		txd->tx_flags &= ~VTE_TXMBUF;
1027 		txd->tx_m = NULL;
1028 		prog++;
1029 		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1030 	}
1031 
1032 	if (prog > 0) {
1033 		ifp->if_flags &= ~IFF_OACTIVE;
1034 		sc->vte_cdata.vte_tx_cons = cons;
1035 		/*
1036 		 * Unarm watchdog timer only when there is no pending
1037 		 * frames in TX queue.
1038 		 */
1039 		if (sc->vte_cdata.vte_tx_cnt == 0)
1040 			sc->vte_watchdog_timer = 0;
1041 	}
1042 }
1043 
1044 static int
1045 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1046 {
1047 	struct mbuf *m;
1048 	bus_dmamap_t map;
1049 
1050 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1051 	if (m == NULL)
1052 		return (ENOBUFS);
1053 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1054 	m_adj(m, sizeof(uint32_t));
1055 
1056 	if (bus_dmamap_load_mbuf(sc->vte_dmatag,
1057 	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT) != 0) {
1058 		m_freem(m);
1059 		return (ENOBUFS);
1060 	}
1061 	KASSERT(sc->vte_cdata.vte_rx_sparemap->dm_nsegs == 1);
1062 
1063 	if (rxd->rx_m != NULL) {
1064 		bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap,
1065 		    0, rxd->rx_dmamap->dm_mapsize,
1066 		    BUS_DMASYNC_POSTREAD);
1067 		bus_dmamap_unload(sc->vte_dmatag, rxd->rx_dmamap);
1068 	}
1069 	map = rxd->rx_dmamap;
1070 	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1071 	sc->vte_cdata.vte_rx_sparemap = map;
1072 	bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap,
1073 	    0, rxd->rx_dmamap->dm_mapsize,
1074 	    BUS_DMASYNC_PREREAD);
1075 	rxd->rx_m = m;
1076 	rxd->rx_desc->drbp =
1077 	    htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
1078 	rxd->rx_desc->drlen = htole16(
1079 	    VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
1080 	DPRINTF(("rx data %p mbuf %p buf 0x%x/0x%x\n", rxd, m,
1081 		(u_int)rxd->rx_dmamap->dm_segs[0].ds_addr,
1082 		rxd->rx_dmamap->dm_segs[0].ds_len));
1083 	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1084 
1085 	return (0);
1086 }
1087 
1088 static void
1089 vte_rxeof(struct vte_softc *sc)
1090 {
1091 	struct ifnet *ifp;
1092 	struct vte_rxdesc *rxd;
1093 	struct mbuf *m;
1094 	uint16_t status, total_len;
1095 	int cons, prog;
1096 
1097 	bus_dmamap_sync(sc->vte_dmatag,
1098 	    sc->vte_cdata.vte_rx_ring_map, 0,
1099 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1100 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1101 	cons = sc->vte_cdata.vte_rx_cons;
1102 	ifp = &sc->vte_if;
1103 	DPRINTF(("vte_rxeof if_flags 0x%x\n", ifp->if_flags));
1104 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1105 	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1106 		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1107 		status = le16toh(rxd->rx_desc->drst);
1108 		DPRINTF(("vte_rxoef rxd %d/%p mbuf %p status 0x%x len %d\n",
1109 			cons, rxd, rxd->rx_m, status,
1110 			VTE_RX_LEN(le16toh(rxd->rx_desc->drlen))));
1111 		if ((status & VTE_DRST_RX_OWN) != 0)
1112 			break;
1113 		total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1114 		m = rxd->rx_m;
1115 		if ((status & VTE_DRST_RX_OK) == 0) {
1116 			/* Discard errored frame. */
1117 			rxd->rx_desc->drlen =
1118 			    htole16(MCLBYTES - sizeof(uint32_t));
1119 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1120 			continue;
1121 		}
1122 		if (vte_newbuf(sc, rxd) != 0) {
1123 			DPRINTF(("vte_rxeof newbuf failed\n"));
1124 			ifp->if_ierrors++;
1125 			rxd->rx_desc->drlen =
1126 			    htole16(MCLBYTES - sizeof(uint32_t));
1127 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1128 			continue;
1129 		}
1130 
1131 		/*
1132 		 * It seems there is no way to strip FCS bytes.
1133 		 */
1134 		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1135 		m_set_rcvif(m, ifp);
1136 		if_percpuq_enqueue(ifp->if_percpuq, m);
1137 	}
1138 
1139 	if (prog > 0) {
1140 		/* Update the consumer index. */
1141 		sc->vte_cdata.vte_rx_cons = cons;
1142 		/*
1143 		 * Sync updated RX descriptors such that controller see
1144 		 * modified RX buffer addresses.
1145 		 */
1146 		bus_dmamap_sync(sc->vte_dmatag,
1147 		    sc->vte_cdata.vte_rx_ring_map, 0,
1148 		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1149 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1150 #ifdef notyet
1151 		/*
1152 		 * Update residue counter.  Controller does not
1153 		 * keep track of number of available RX descriptors
1154 		 * such that driver should have to update VTE_MRDCR
1155 		 * to make controller know how many free RX
1156 		 * descriptors were added to controller.  This is
1157 		 * a similar mechanism used in VIA velocity
1158 		 * controllers and it indicates controller just
1159 		 * polls OWN bit of current RX descriptor pointer.
1160 		 * A couple of severe issues were seen on sample
1161 		 * board where the controller continuously emits TX
1162 		 * pause frames once RX pause threshold crossed.
1163 		 * Once triggered it never recovered form that
1164 		 * state, I couldn't find a way to make it back to
1165 		 * work at least.  This issue effectively
1166 		 * disconnected the system from network.  Also, the
1167 		 * controller used 00:00:00:00:00:00 as source
1168 		 * station address of TX pause frame. Probably this
1169 		 * is one of reason why vendor recommends not to
1170 		 * enable flow control on R6040 controller.
1171 		 */
1172 		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1173 		    (((VTE_RX_RING_CNT * 2) / 10) <<
1174 		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1175 #endif
1176 	rnd_add_uint32(&sc->rnd_source, prog);
1177 	}
1178 }
1179 
1180 static void
1181 vte_tick(void *arg)
1182 {
1183 	struct vte_softc *sc;
1184 	int s = splnet();
1185 
1186 	sc = (struct vte_softc *)arg;
1187 
1188 	mii_tick(&sc->vte_mii);
1189 	vte_stats_update(sc);
1190 	vte_txeof(sc);
1191 	vte_ifwatchdog(&sc->vte_if);
1192 	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1193 	splx(s);
1194 }
1195 
1196 static void
1197 vte_reset(struct vte_softc *sc)
1198 {
1199 	uint16_t mcr;
1200 	int i;
1201 
1202 	mcr = CSR_READ_2(sc, VTE_MCR1);
1203 	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1204 	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1205 		DELAY(10);
1206 		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1207 			break;
1208 	}
1209 	if (i == 0)
1210 		aprint_error_dev(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1211 	/*
1212 	 * Follow the guide of vendor recommended way to reset MAC.
1213 	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1214 	 * not reliable so manually reset internal state machine.
1215 	 */
1216 	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1217 	CSR_WRITE_2(sc, VTE_MACSM, 0);
1218 	DELAY(5000);
1219 }
1220 
1221 
1222 static int
1223 vte_init(struct ifnet *ifp)
1224 {
1225 	struct vte_softc *sc = ifp->if_softc;
1226 	bus_addr_t paddr;
1227 	uint8_t eaddr[ETHER_ADDR_LEN];
1228 	int s, error;
1229 
1230 	s = splnet();
1231 	/*
1232 	 * Cancel any pending I/O.
1233 	 */
1234 	vte_stop(ifp, 1);
1235 	/*
1236 	 * Reset the chip to a known state.
1237 	 */
1238 	vte_reset(sc);
1239 
1240 	if ((sc->vte_if.if_flags & IFF_UP) == 0) {
1241 		splx(s);
1242 		return 0;
1243 	}
1244 
1245 	/* Initialize RX descriptors. */
1246 	if (vte_init_rx_ring(sc) != 0) {
1247 		aprint_error_dev(sc->vte_dev, "no memory for RX buffers.\n");
1248 		vte_stop(ifp, 1);
1249 		splx(s);
1250 		return ENOMEM;
1251 	}
1252 	if (vte_init_tx_ring(sc) != 0) {
1253 		aprint_error_dev(sc->vte_dev, "no memory for TX buffers.\n");
1254 		vte_stop(ifp, 1);
1255 		splx(s);
1256 		return ENOMEM;
1257 	}
1258 
1259 	/*
1260 	 * Reprogram the station address.  Controller supports up
1261 	 * to 4 different station addresses so driver programs the
1262 	 * first station address as its own ethernet address and
1263 	 * configure the remaining three addresses as perfect
1264 	 * multicast addresses.
1265 	 */
1266 	memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1267 	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1268 	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1269 	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1270 
1271 	/* Set TX descriptor base addresses. */
1272 	paddr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
1273 	DPRINTF(("tx paddr 0x%x\n", (u_int)paddr));
1274 	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1275 	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1276 
1277 	/* Set RX descriptor base addresses. */
1278 	paddr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
1279 	DPRINTF(("rx paddr 0x%x\n", (u_int)paddr));
1280 	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1281 	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1282 	/*
1283 	 * Initialize RX descriptor residue counter and set RX
1284 	 * pause threshold to 20% of available RX descriptors.
1285 	 * See comments on vte_rxeof() for details on flow control
1286 	 * issues.
1287 	 */
1288 	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1289 	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1290 
1291 	/*
1292 	 * Always use maximum frame size that controller can
1293 	 * support.  Otherwise received frames that has longer
1294 	 * frame length than vte(4) MTU would be silently dropped
1295 	 * in controller.  This would break path-MTU discovery as
1296 	 * sender wouldn't get any responses from receiver. The
1297 	 * RX buffer size should be multiple of 4.
1298 	 * Note, jumbo frames are silently ignored by controller
1299 	 * and even MAC counters do not detect them.
1300 	 */
1301 	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1302 
1303 	/* Configure FIFO. */
1304 	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1305 	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1306 	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1307 
1308 	/*
1309 	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1310 	 * control configuration is done after detecting a valid
1311 	 * link.  Note, we don't generate early interrupt here
1312 	 * as well since FreeBSD does not have interrupt latency
1313 	 * problems like Windows.
1314 	 */
1315 	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1316 	/*
1317 	 * We manually keep track of PHY status changes to
1318 	 * configure resolved duplex and flow control since only
1319 	 * duplex configuration can be automatically reflected to
1320 	 * MCR0.
1321 	 */
1322 	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1323 	    MCR1_EXCESS_COL_RETRY_16);
1324 
1325 	/* Initialize RX filter. */
1326 	vte_rxfilter(sc);
1327 
1328 	/* Disable TX/RX interrupt moderation control. */
1329 	CSR_WRITE_2(sc, VTE_MRICR, 0);
1330 	CSR_WRITE_2(sc, VTE_MTICR, 0);
1331 
1332 	/* Enable MAC event counter interrupts. */
1333 	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1334 	/* Clear MAC statistics. */
1335 	vte_stats_clear(sc);
1336 
1337 	/* Acknowledge all pending interrupts and clear it. */
1338 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1339 	CSR_WRITE_2(sc, VTE_MISR, 0);
1340 	DPRINTF(("before ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER),
1341 		CSR_READ_2(sc, VTE_MISR)));
1342 
1343 	sc->vte_flags &= ~VTE_FLAG_LINK;
1344 	ifp->if_flags |= IFF_RUNNING;
1345 	ifp->if_flags &= ~IFF_OACTIVE;
1346 
1347 	/* calling mii_mediachg will call back vte_start_mac() */
1348 	if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO)
1349 		error = 0;
1350 	else if (error != 0) {
1351 		aprint_error_dev(sc->vte_dev, "could not set media\n");
1352 		splx(s);
1353 		return error;
1354 	}
1355 
1356 	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1357 
1358 	DPRINTF(("ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER),
1359 		CSR_READ_2(sc, VTE_MISR)));
1360 	splx(s);
1361 	return 0;
1362 }
1363 
1364 static void
1365 vte_stop(struct ifnet *ifp, int disable)
1366 {
1367 	struct vte_softc *sc = ifp->if_softc;
1368 	struct vte_txdesc *txd;
1369 	struct vte_rxdesc *rxd;
1370 	int i;
1371 
1372 	DPRINTF(("vte_stop if_flags 0x%x\n", ifp->if_flags));
1373 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1374 		return;
1375 	/*
1376 	 * Mark the interface down and cancel the watchdog timer.
1377 	 */
1378 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1379 	sc->vte_flags &= ~VTE_FLAG_LINK;
1380 	callout_stop(&sc->vte_tick_ch);
1381 	sc->vte_watchdog_timer = 0;
1382 	vte_stats_update(sc);
1383 	/* Disable interrupts. */
1384 	CSR_WRITE_2(sc, VTE_MIER, 0);
1385 	CSR_WRITE_2(sc, VTE_MECIER, 0);
1386 	/* Stop RX/TX MACs. */
1387 	vte_stop_mac(sc);
1388 	/* Clear interrupts. */
1389 	CSR_READ_2(sc, VTE_MISR);
1390 	/*
1391 	 * Free TX/RX mbufs still in the queues.
1392 	 */
1393 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1394 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1395 		if (rxd->rx_m != NULL) {
1396 			bus_dmamap_sync(sc->vte_dmatag,
1397 			    rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
1398 			    BUS_DMASYNC_POSTREAD);
1399 			bus_dmamap_unload(sc->vte_dmatag,
1400 			    rxd->rx_dmamap);
1401 			m_freem(rxd->rx_m);
1402 			rxd->rx_m = NULL;
1403 		}
1404 	}
1405 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1406 		txd = &sc->vte_cdata.vte_txdesc[i];
1407 		if (txd->tx_m != NULL) {
1408 			bus_dmamap_sync(sc->vte_dmatag,
1409 			    txd->tx_dmamap, 0, txd->tx_dmamap->dm_mapsize,
1410 			    BUS_DMASYNC_POSTWRITE);
1411 			bus_dmamap_unload(sc->vte_dmatag,
1412 			    txd->tx_dmamap);
1413 			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1414 				m_freem(txd->tx_m);
1415 			txd->tx_m = NULL;
1416 			txd->tx_flags &= ~VTE_TXMBUF;
1417 		}
1418 	}
1419 	/* Free TX mbuf pools used for deep copy. */
1420 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1421 		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1422 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1423 			sc->vte_cdata.vte_txmbufs[i] = NULL;
1424 		}
1425 	}
1426 }
1427 
1428 static void
1429 vte_start_mac(struct vte_softc *sc)
1430 {
1431 	struct ifnet *ifp = &sc->vte_if;
1432 	uint16_t mcr;
1433 	int i;
1434 
1435 	/* Enable RX/TX MACs. */
1436 	mcr = CSR_READ_2(sc, VTE_MCR0);
1437 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1438 	    (MCR0_RX_ENB | MCR0_TX_ENB) &&
1439 	    (ifp->if_flags & IFF_RUNNING) != 0) {
1440 		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1441 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1442 		for (i = VTE_TIMEOUT; i > 0; i--) {
1443 			mcr = CSR_READ_2(sc, VTE_MCR0);
1444 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1445 			    (MCR0_RX_ENB | MCR0_TX_ENB))
1446 				break;
1447 			DELAY(10);
1448 		}
1449 		if (i == 0)
1450 			aprint_error_dev(sc->vte_dev,
1451 			    "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1452 	}
1453 	vte_rxfilter(sc);
1454 }
1455 
1456 static void
1457 vte_stop_mac(struct vte_softc *sc)
1458 {
1459 	uint16_t mcr;
1460 	int i;
1461 
1462 	/* Disable RX/TX MACs. */
1463 	mcr = CSR_READ_2(sc, VTE_MCR0);
1464 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1465 		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1466 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1467 		for (i = VTE_TIMEOUT; i > 0; i--) {
1468 			mcr = CSR_READ_2(sc, VTE_MCR0);
1469 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1470 				break;
1471 			DELAY(10);
1472 		}
1473 		if (i == 0)
1474 			aprint_error_dev(sc->vte_dev,
1475 			    "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1476 	}
1477 }
1478 
1479 static int
1480 vte_init_tx_ring(struct vte_softc *sc)
1481 {
1482 	struct vte_tx_desc *desc;
1483 	struct vte_txdesc *txd;
1484 	bus_addr_t addr;
1485 	int i;
1486 
1487 	sc->vte_cdata.vte_tx_prod = 0;
1488 	sc->vte_cdata.vte_tx_cons = 0;
1489 	sc->vte_cdata.vte_tx_cnt = 0;
1490 
1491 	/* Pre-allocate TX mbufs for deep copy. */
1492 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1493 		sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT,
1494 		    MT_DATA, M_PKTHDR);
1495 		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1496 			return (ENOBUFS);
1497 		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1498 		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1499 	}
1500 	desc = sc->vte_cdata.vte_tx_ring;
1501 	bzero(desc, VTE_TX_RING_SZ);
1502 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1503 		txd = &sc->vte_cdata.vte_txdesc[i];
1504 		txd->tx_m = NULL;
1505 		if (i != VTE_TX_RING_CNT - 1)
1506 			addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr +
1507 			    sizeof(struct vte_tx_desc) * (i + 1);
1508 		else
1509 			addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr +
1510 			    sizeof(struct vte_tx_desc) * 0;
1511 		desc = &sc->vte_cdata.vte_tx_ring[i];
1512 		desc->dtnp = htole32(addr);
1513 		DPRINTF(("tx ring desc %d addr 0x%x\n", i, (u_int)addr));
1514 		txd->tx_desc = desc;
1515 	}
1516 
1517 	bus_dmamap_sync(sc->vte_dmatag,
1518 	    sc->vte_cdata.vte_tx_ring_map, 0,
1519 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
1520 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1521 	return (0);
1522 }
1523 
1524 static int
1525 vte_init_rx_ring(struct vte_softc *sc)
1526 {
1527 	struct vte_rx_desc *desc;
1528 	struct vte_rxdesc *rxd;
1529 	bus_addr_t addr;
1530 	int i;
1531 
1532 	sc->vte_cdata.vte_rx_cons = 0;
1533 	desc = sc->vte_cdata.vte_rx_ring;
1534 	bzero(desc, VTE_RX_RING_SZ);
1535 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1536 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1537 		rxd->rx_m = NULL;
1538 		if (i != VTE_RX_RING_CNT - 1)
1539 			addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr
1540 			    + sizeof(struct vte_rx_desc) * (i + 1);
1541 		else
1542 			addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr
1543 			    + sizeof(struct vte_rx_desc) * 0;
1544 		desc = &sc->vte_cdata.vte_rx_ring[i];
1545 		desc->drnp = htole32(addr);
1546 		DPRINTF(("rx ring desc %d addr 0x%x\n", i, (u_int)addr));
1547 		rxd->rx_desc = desc;
1548 		if (vte_newbuf(sc, rxd) != 0)
1549 			return (ENOBUFS);
1550 	}
1551 
1552 	bus_dmamap_sync(sc->vte_dmatag,
1553 	    sc->vte_cdata.vte_rx_ring_map, 0,
1554 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1555 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1556 
1557 	return (0);
1558 }
1559 
1560 static void
1561 vte_rxfilter(struct vte_softc *sc)
1562 {
1563 	struct ether_multistep step;
1564 	struct ether_multi *enm;
1565 	struct ifnet *ifp;
1566 	uint8_t *eaddr;
1567 	uint32_t crc;
1568 	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1569 	uint16_t mchash[4], mcr;
1570 	int i, nperf;
1571 
1572 	ifp = &sc->vte_if;
1573 
1574 	DPRINTF(("vte_rxfilter\n"));
1575 	memset(mchash, 0, sizeof(mchash));
1576 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1577 		rxfilt_perf[i][0] = 0xFFFF;
1578 		rxfilt_perf[i][1] = 0xFFFF;
1579 		rxfilt_perf[i][2] = 0xFFFF;
1580 	}
1581 
1582 	mcr = CSR_READ_2(sc, VTE_MCR0);
1583 	DPRINTF(("vte_rxfilter mcr 0x%x\n", mcr));
1584 	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1585 	if ((ifp->if_flags & IFF_BROADCAST) == 0)
1586 		mcr |= MCR0_BROADCAST_DIS;
1587 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1588 		if ((ifp->if_flags & IFF_PROMISC) != 0)
1589 			mcr |= MCR0_PROMISC;
1590 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1591 			mcr |= MCR0_MULTICAST;
1592 		mchash[0] = 0xFFFF;
1593 		mchash[1] = 0xFFFF;
1594 		mchash[2] = 0xFFFF;
1595 		mchash[3] = 0xFFFF;
1596 		goto chipit;
1597 	}
1598 
1599 	ETHER_FIRST_MULTI(step, &sc->vte_ec, enm);
1600 	nperf = 0;
1601 	while (enm != NULL) {
1602 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1603 			sc->vte_if.if_flags |= IFF_ALLMULTI;
1604 			mcr |= MCR0_MULTICAST;
1605 			mchash[0] = 0xFFFF;
1606 			mchash[1] = 0xFFFF;
1607 			mchash[2] = 0xFFFF;
1608 			mchash[3] = 0xFFFF;
1609 			goto chipit;
1610 		}
1611 		/*
1612 		 * Program the first 3 multicast groups into
1613 		 * the perfect filter.  For all others, use the
1614 		 * hash table.
1615 		 */
1616 		if (nperf < VTE_RXFILT_PERFECT_CNT) {
1617 			eaddr = enm->enm_addrlo;
1618 			rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0];
1619 			rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2];
1620 			rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4];
1621 			nperf++;
1622 		} else {
1623 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1624 			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1625 		}
1626 		ETHER_NEXT_MULTI(step, enm);
1627 	}
1628 	if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1629 	    mchash[3] != 0)
1630 		mcr |= MCR0_MULTICAST;
1631 
1632 chipit:
1633 	/* Program multicast hash table. */
1634 	DPRINTF(("chipit write multicast\n"));
1635 	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1636 	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1637 	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1638 	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1639 	/* Program perfect filter table. */
1640 	DPRINTF(("chipit write perfect filter\n"));
1641 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1642 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1643 		    rxfilt_perf[i][0]);
1644 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1645 		    rxfilt_perf[i][1]);
1646 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1647 		    rxfilt_perf[i][2]);
1648 	}
1649 	DPRINTF(("chipit mcr0 0x%x\n", mcr));
1650 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1651 	DPRINTF(("chipit read mcro\n"));
1652 	CSR_READ_2(sc, VTE_MCR0);
1653 	DPRINTF(("chipit done\n"));
1654 }
1655 
1656 /*
1657  * Set up sysctl(3) MIB, hw.vte.* - Individual controllers will be
1658  * set up in vte_pci_attach()
1659  */
1660 SYSCTL_SETUP(sysctl_vte, "sysctl vte subtree setup")
1661 {
1662 	int rc;
1663 	const struct sysctlnode *node;
1664 
1665 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
1666 	    0, CTLTYPE_NODE, "vte",
1667 	    SYSCTL_DESCR("vte interface controls"),
1668 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1669 		goto err;
1670 	}
1671 
1672 	vte_root_num = node->sysctl_num;
1673 	return;
1674 
1675 err:
1676 	aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
1677 }
1678 
1679 static int
1680 vte_sysctl_intrxct(SYSCTLFN_ARGS)
1681 {
1682 	int error, t;
1683 	struct sysctlnode node;
1684 	struct vte_softc *sc;
1685 
1686 	node = *rnode;
1687 	sc = node.sysctl_data;
1688 	t = sc->vte_int_rx_mod;
1689 	node.sysctl_data = &t;
1690 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1691 	if (error || newp == NULL)
1692 		return error;
1693 	if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX)
1694 		return EINVAL;
1695 
1696 	sc->vte_int_rx_mod = t;
1697 	vte_miibus_statchg(&sc->vte_if);
1698 	return 0;
1699 }
1700 
1701 static int
1702 vte_sysctl_inttxct(SYSCTLFN_ARGS)
1703 {
1704 	int error, t;
1705 	struct sysctlnode node;
1706 	struct vte_softc *sc;
1707 
1708 	node = *rnode;
1709 	sc = node.sysctl_data;
1710 	t = sc->vte_int_tx_mod;
1711 	node.sysctl_data = &t;
1712 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1713 	if (error || newp == NULL)
1714 		return error;
1715 
1716 	if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX)
1717 		return EINVAL;
1718 	sc->vte_int_tx_mod = t;
1719 	vte_miibus_statchg(&sc->vte_if);
1720 	return 0;
1721 }
1722