xref: /openbsd-src/sys/dev/pci/if_vte.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: if_vte.c,v 1.5 2011/05/28 08:31:51 kevlo Exp $	*/
2 /*-
3  * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30 
31 #include "bpfilter.h"
32 #include "vlan.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/sockio.h>
39 #include <sys/mbuf.h>
40 #include <sys/queue.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/timeout.h>
44 #include <sys/socket.h>
45 
46 #include <machine/bus.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_llc.h>
51 #include <net/if_media.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 
61 #include <net/if_types.h>
62 #include <net/if_vlan_var.h>
63 
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67 
68 #include <dev/rndvar.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
75 #include <dev/pci/pcidevs.h>
76 
77 #include <dev/pci/if_vtereg.h>
78 
79 int	vte_match(struct device *, void *, void *);
80 void	vte_attach(struct device *, struct device *, void *);
81 int	vte_detach(struct device *, int);
82 
83 int	vte_miibus_readreg(struct device *, int, int);
84 void	vte_miibus_writereg(struct device *, int, int, int);
85 void	vte_miibus_statchg(struct device *);
86 
87 int	vte_init(struct ifnet *);
88 void	vte_start(struct ifnet *);
89 int	vte_ioctl(struct ifnet *, u_long, caddr_t);
90 void	vte_watchdog(struct ifnet *);
91 int	vte_mediachange(struct ifnet *);
92 void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
93 
94 int	vte_intr(void *);
95 int	vte_dma_alloc(struct vte_softc *);
96 void	vte_dma_free(struct vte_softc *);
97 struct vte_txdesc *
98 	    vte_encap(struct vte_softc *, struct mbuf **);
99 void	vte_get_macaddr(struct vte_softc *);
100 int	vte_init_rx_ring(struct vte_softc *);
101 int	vte_init_tx_ring(struct vte_softc *);
102 void	vte_mac_config(struct vte_softc *);
103 int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
104 void	vte_reset(struct vte_softc *);
105 void	vte_rxeof(struct vte_softc *);
106 void	vte_iff(struct vte_softc *);
107 void	vte_start_mac(struct vte_softc *);
108 void	vte_stats_clear(struct vte_softc *);
109 void	vte_stats_update(struct vte_softc *);
110 void	vte_stop(struct vte_softc *);
111 void	vte_stop_mac(struct vte_softc *);
112 void	vte_tick(void *);
113 void	vte_txeof(struct vte_softc *);
114 
115 const struct pci_matchid vte_devices[] = {
116 	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
117 };
118 
119 struct cfattach vte_ca = {
120 	sizeof(struct vte_softc), vte_match, vte_attach
121 };
122 
123 struct cfdriver vte_cd = {
124 	NULL, "vte", DV_IFNET
125 };
126 
127 int vtedebug = 0;
128 #define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
129 
130 int
131 vte_miibus_readreg(struct device *dev, int phy, int reg)
132 {
133 	struct vte_softc *sc = (struct vte_softc *)dev;
134 	int i;
135 
136 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
137 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
138 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
139 		DELAY(5);
140 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
141 			break;
142 	}
143 
144 	if (i == 0) {
145 		printf("%s: phy read timeout: phy %d, reg %d\n",
146 		    sc->sc_dev.dv_xname, phy, reg);
147 		return (0);
148 	}
149 
150 	return (CSR_READ_2(sc, VTE_MMRD));
151 }
152 
153 void
154 vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
155 {
156 	struct vte_softc *sc = (struct vte_softc *)dev;
157 	int i;
158 
159 	CSR_WRITE_2(sc, VTE_MMWD, val);
160 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
161 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
162 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
163 		DELAY(5);
164 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
165 			break;
166 	}
167 
168 	if (i == 0)
169 		printf("%s: phy write timeout: phy %d, reg %d\n",
170 		    sc->sc_dev.dv_xname, phy, reg);
171 }
172 
173 void
174 vte_miibus_statchg(struct device *dev)
175 {
176 	struct vte_softc *sc = (struct vte_softc *)dev;
177 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
178 	struct mii_data *mii;
179 	uint16_t val;
180 
181 	if ((ifp->if_flags & IFF_RUNNING) == 0)
182 		return;
183 
184 	mii = &sc->sc_miibus;
185 
186 	sc->vte_flags &= ~VTE_FLAG_LINK;
187 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
188 	    (IFM_ACTIVE | IFM_AVALID)) {
189 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
190 		case IFM_10_T:
191 		case IFM_100_TX:
192 			sc->vte_flags |= VTE_FLAG_LINK;
193 			break;
194 		default:
195 			break;
196 		}
197 	}
198 
199 	/* Stop RX/TX MACs. */
200 	vte_stop_mac(sc);
201 	/* Program MACs with resolved duplex and flow control. */
202 	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
203 		/*
204 		 * Timer waiting time : (63 + TIMER * 64) MII clock.
205 		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
206 		 */
207 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
208 			val = 18 << VTE_IM_TIMER_SHIFT;
209 		else
210 			val = 1 << VTE_IM_TIMER_SHIFT;
211 		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
212 		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
213 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
214 		CSR_WRITE_2(sc, VTE_MRICR, val);
215 
216 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
217 			val = 18 << VTE_IM_TIMER_SHIFT;
218 		else
219 			val = 1 << VTE_IM_TIMER_SHIFT;
220 		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
221 		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
222 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
223 		CSR_WRITE_2(sc, VTE_MTICR, val);
224 
225 		vte_mac_config(sc);
226 		vte_start_mac(sc);
227 	}
228 }
229 
230 void
231 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
232 {
233 	struct vte_softc *sc = ifp->if_softc;
234 	struct mii_data *mii = &sc->sc_miibus;
235 
236 	mii_pollstat(mii);
237 	ifmr->ifm_status = mii->mii_media_status;
238 	ifmr->ifm_active = mii->mii_media_active;
239 }
240 
241 int
242 vte_mediachange(struct ifnet *ifp)
243 {
244 	struct vte_softc *sc = ifp->if_softc;
245 	struct mii_data *mii = &sc->sc_miibus;
246 	int error;
247 
248 	if (mii->mii_instance != 0) {
249 		struct mii_softc *miisc;
250 
251 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
252 			mii_phy_reset(miisc);
253 	}
254 	error = mii_mediachg(mii);
255 
256 	return (error);
257 }
258 
259 int
260 vte_match(struct device *dev, void *match, void *aux)
261 {
262 	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
263 	    sizeof(vte_devices) / sizeof(vte_devices[0]));
264 }
265 
266 void
267 vte_get_macaddr(struct vte_softc *sc)
268 {
269 	uint16_t mid;
270 
271 	/*
272 	 * It seems there is no way to reload station address and
273 	 * it is supposed to be set by BIOS.
274 	 */
275 	mid = CSR_READ_2(sc, VTE_MID0L);
276 	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
277 	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
278 	mid = CSR_READ_2(sc, VTE_MID0M);
279 	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
280 	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
281 	mid = CSR_READ_2(sc, VTE_MID0H);
282 	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
283 	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
284 }
285 
286 void
287 vte_attach(struct device *parent, struct device *self, void *aux)
288 {
289 	struct vte_softc *sc = (struct vte_softc *)self;
290 	struct pci_attach_args *pa = aux;
291 	pci_chipset_tag_t pc = pa->pa_pc;
292 	pci_intr_handle_t ih;
293 	const char *intrstr;
294 	struct ifnet *ifp;
295 	pcireg_t memtype;
296 	int error = 0;
297 
298 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
299 	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
300 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
301 		printf(": can't map mem space\n");
302 		return;
303 	}
304 
305 	if (pci_intr_map(pa, &ih) != 0) {
306 		printf(": can't map interrupt\n");
307 		goto fail;
308 	}
309 
310   	/*
311 	 * Allocate IRQ
312 	 */
313 	intrstr = pci_intr_string(pc, ih);
314 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
315 	    sc->sc_dev.dv_xname);
316 	if (sc->sc_irq_handle == NULL) {
317 		printf(": could not establish interrupt");
318 		if (intrstr != NULL)
319 			printf(" at %s", intrstr);
320 		printf("\n");
321 		goto fail;
322 	}
323 	printf(": %s", intrstr);
324 
325 	sc->sc_dmat = pa->pa_dmat;
326 	sc->sc_pct = pa->pa_pc;
327 	sc->sc_pcitag = pa->pa_tag;
328 
329 	/* Reset the ethernet controller. */
330 	vte_reset(sc);
331 
332 	error = vte_dma_alloc(sc);
333 	if (error)
334 		goto fail;
335 
336 	/* Load station address. */
337 	vte_get_macaddr(sc);
338 
339 	ifp = &sc->sc_arpcom.ac_if;
340 	ifp->if_softc = sc;
341 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
342 	ifp->if_ioctl = vte_ioctl;
343 	ifp->if_start = vte_start;
344 	ifp->if_watchdog = vte_watchdog;
345 	ifp->if_baudrate = IF_Mbps(100);
346 	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
347 	IFQ_SET_READY(&ifp->if_snd);
348 	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
349 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
350 
351 	ifp->if_capabilities = IFCAP_VLAN_MTU;
352 
353 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
354 
355 	/*
356 	 * Set up MII bus.
357 	 * BIOS would have initialized VTE_MPSCCR to catch PHY
358 	 * status changes so driver may be able to extract
359 	 * configured PHY address.  Since it's common to see BIOS
360 	 * fails to initialize the register(including the sample
361 	 * board I have), let mii(4) probe it.  This is more
362 	 * reliable than relying on BIOS's initialization.
363 	 *
364 	 * Advertising flow control capability to mii(4) was
365 	 * intentionally disabled due to severe problems in TX
366 	 * pause frame generation.  See vte_rxeof() for more
367 	 * details.
368 	 */
369 	sc->sc_miibus.mii_ifp = ifp;
370 	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
371 	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
372 	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
373 
374 	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
375 	    vte_mediastatus);
376 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
377 	    MII_OFFSET_ANY, 0);
378 
379 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
380 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
381 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
382 		    0, NULL);
383 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
384 	} else
385 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
386 
387 	if_attach(ifp);
388 	ether_ifattach(ifp);
389 
390 	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
391 	return;
392 fail:
393 	vte_detach(&sc->sc_dev, 0);
394 }
395 
396 int
397 vte_detach(struct device *self, int flags)
398 {
399 	struct vte_softc *sc = (struct vte_softc *)self;
400 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
401 	int s;
402 
403 	s = splnet();
404 	vte_stop(sc);
405 	splx(s);
406 
407 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
408 
409 	/* Delete all remaining media. */
410 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
411 
412 	ether_ifdetach(ifp);
413 	if_detach(ifp);
414 	vte_dma_free(sc);
415 
416 	if (sc->sc_irq_handle != NULL) {
417 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
418 		sc->sc_irq_handle = NULL;
419 	}
420 
421 	return (0);
422 }
423 
424 int
425 vte_dma_alloc(struct vte_softc *sc)
426 {
427 	struct vte_txdesc *txd;
428 	struct vte_rxdesc *rxd;
429 	int error, i, nsegs;
430 
431 	/* Create DMA stuffs for TX ring */
432 	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
433 	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
434 	if (error)
435 		return (ENOBUFS);
436 
437 	/* Allocate DMA'able memory for TX ring */
438 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
439 	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
440 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
441 	if (error) {
442 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
443 		    sc->sc_dev.dv_xname);
444 		return (error);
445 	}
446 
447 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
448 	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
449 	    BUS_DMA_NOWAIT);
450 	if (error)
451 		return (ENOBUFS);
452 
453 	/*  Load the DMA map for Tx ring. */
454 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
455 	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
456 	if (error) {
457 		printf("%s: could not load DMA'able memory for Tx ring.\n",
458 		    sc->sc_dev.dv_xname);
459 		bus_dmamem_free(sc->sc_dmat,
460 		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
461 		return (error);
462 	}
463 
464 	sc->vte_cdata.vte_tx_ring_paddr =
465 	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
466 
467 	/* Create DMA stuffs for RX ring */
468 	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
469 	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
470 	if (error)
471 		return (ENOBUFS);
472 
473 	/* Allocate DMA'able memory for RX ring */
474 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
475 	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
476 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
477 	if (error) {
478 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
479 		    sc->sc_dev.dv_xname);
480 		return (error);
481 	}
482 
483 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
484 	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
485 	    BUS_DMA_NOWAIT);
486 	if (error)
487 		return (ENOBUFS);
488 
489 	/* Load the DMA map for Rx ring. */
490 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
491 	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
492 	if (error) {
493 		printf("%s: could not load DMA'able memory for Rx ring.\n",
494 		    sc->sc_dev.dv_xname);
495 		bus_dmamem_free(sc->sc_dmat,
496 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
497 		return (error);
498 	}
499 
500 	sc->vte_cdata.vte_rx_ring_paddr =
501 	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
502 
503 	/* Create DMA maps for Tx buffers. */
504 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
505 		txd = &sc->vte_cdata.vte_txdesc[i];
506 		txd->tx_m = NULL;
507 		txd->tx_dmamap = NULL;
508 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
509 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
510 		if (error) {
511 			printf("%s: could not create Tx dmamap.\n",
512 			    sc->sc_dev.dv_xname);
513 			return (error);
514 		}
515 	}
516 
517 	/* Create DMA maps for Rx buffers. */
518 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
519 	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
520 	if (error) {
521 		printf("%s: could not create spare Rx dmamap.\n",
522 		    sc->sc_dev.dv_xname);
523 		return (error);
524 	}
525 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
526 		rxd = &sc->vte_cdata.vte_rxdesc[i];
527 		rxd->rx_m = NULL;
528 		rxd->rx_dmamap = NULL;
529 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
530 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
531 		if (error) {
532 			printf("%s: could not create Rx dmamap.\n",
533 			    sc->sc_dev.dv_xname);
534 			return (error);
535 		}
536 	}
537 
538 	return (0);
539 }
540 
541 void
542 vte_dma_free(struct vte_softc *sc)
543 {
544 	struct vte_txdesc *txd;
545 	struct vte_rxdesc *rxd;
546 	int i;
547 
548 	/* TX buffers. */
549 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
550 		txd = &sc->vte_cdata.vte_txdesc[i];
551 		if (txd->tx_dmamap != NULL) {
552 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
553 			txd->tx_dmamap = NULL;
554 		}
555 	}
556 	/* Rx buffers */
557 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
558 		rxd = &sc->vte_cdata.vte_rxdesc[i];
559 		if (rxd->rx_dmamap != NULL) {
560 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
561 			rxd->rx_dmamap = NULL;
562 		}
563 	}
564 	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
565 		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
566 		sc->vte_cdata.vte_rx_sparemap = NULL;
567 	}
568 	/* TX descriptor ring. */
569 	if (sc->vte_cdata.vte_tx_ring_map != NULL)
570 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
571 	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
572 	    sc->vte_cdata.vte_tx_ring != NULL)
573 		bus_dmamem_free(sc->sc_dmat,
574 		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
575 	sc->vte_cdata.vte_tx_ring = NULL;
576 	sc->vte_cdata.vte_tx_ring_map = NULL;
577 	/* RX ring. */
578 	if (sc->vte_cdata.vte_rx_ring_map != NULL)
579 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
580 	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
581 	    sc->vte_cdata.vte_rx_ring != NULL)
582 		bus_dmamem_free(sc->sc_dmat,
583 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
584 	sc->vte_cdata.vte_rx_ring = NULL;
585 	sc->vte_cdata.vte_rx_ring_map = NULL;
586 }
587 
588 struct vte_txdesc *
589 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
590 {
591 	struct vte_txdesc *txd;
592 	struct mbuf *m, *n;
593 	int copy, error, padlen;
594 
595 	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
596 	m = *m_head;
597 	/*
598 	 * Controller doesn't auto-pad, so we have to make sure pad
599 	 * short frames out to the minimum frame length.
600 	 */
601 	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
602 		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
603 	else
604 		padlen = 0;
605 
606 	/*
607 	 * Controller does not support multi-fragmented TX buffers.
608 	 * Controller spends most of its TX processing time in
609 	 * de-fragmenting TX buffers.  Either faster CPU or more
610 	 * advanced controller DMA engine is required to speed up
611 	 * TX path processing.
612 	 * To mitigate the de-fragmenting issue, perform deep copy
613 	 * from fragmented mbuf chains to a pre-allocated mbuf
614 	 * cluster with extra cost of kernel memory.  For frames
615 	 * that is composed of single TX buffer, the deep copy is
616 	 * bypassed.
617 	 */
618 	copy = 0;
619 	if (m->m_next != NULL)
620 		copy++;
621 	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
622 		copy++;
623 	if (copy != 0) {
624 		/* Avoid expensive m_defrag(9) and do deep copy. */
625 		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
626 		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
627 		n->m_pkthdr.len = m->m_pkthdr.len;
628 		n->m_len = m->m_pkthdr.len;
629 		m = n;
630 		txd->tx_flags |= VTE_TXMBUF;
631 	}
632 
633 	if (padlen > 0) {
634 		/* Zero out the bytes in the pad area. */
635 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
636 		m->m_pkthdr.len += padlen;
637 		m->m_len = m->m_pkthdr.len;
638 	}
639 
640 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
641 	    BUS_DMA_NOWAIT);
642 
643 	if (error != 0) {
644 		txd->tx_flags &= ~VTE_TXMBUF;
645 		return (NULL);
646 	}
647 
648 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
649 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
650 
651 	txd->tx_desc->dtlen =
652 	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
653 	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
654 	sc->vte_cdata.vte_tx_cnt++;
655 	/* Update producer index. */
656 	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
657 
658 	/* Finally hand over ownership to controller. */
659 	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
660 	txd->tx_m = m;
661 
662 	return (txd);
663 }
664 
665 void
666 vte_start(struct ifnet *ifp)
667 {
668 	struct vte_softc *sc = ifp->if_softc;
669 	struct vte_txdesc *txd;
670 	struct mbuf *m_head;
671 	int enq = 0;
672 
673 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
674 		return;
675 
676 	for (;;) {
677 		/* Reserve one free TX descriptor. */
678 		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
679 			ifp->if_flags |= IFF_OACTIVE;
680 			break;
681 		}
682 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
683 		if (m_head == NULL)
684 			break;
685 
686 		/*
687 		 * Pack the data into the transmit ring. If we
688 		 * don't have room, set the OACTIVE flag and wait
689 		 * for the NIC to drain the ring.
690 		 */
691 		if ((txd = vte_encap(sc, &m_head)) == NULL) {
692 			break;
693 		}
694 
695 		enq++;
696 
697 #if NBPFILTER > 0
698 		/*
699 		 * If there's a BPF listener, bounce a copy of this frame
700 		 * to him.
701 		 */
702 		if (ifp->if_bpf != NULL)
703 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
704 #endif
705 		/* Free consumed TX frame. */
706 		if ((txd->tx_flags & VTE_TXMBUF) != 0)
707 			m_freem(m_head);
708 	}
709 
710 	if (enq > 0) {
711 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
712 		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
713 		    BUS_DMASYNC_PREWRITE);
714 		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
715 		ifp->if_timer = VTE_TX_TIMEOUT;
716 	}
717 }
718 
719 void
720 vte_watchdog(struct ifnet *ifp)
721 {
722 	struct vte_softc *sc = ifp->if_softc;
723 
724 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
725 	ifp->if_oerrors++;
726 	vte_init(ifp);
727 
728 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
729 		vte_start(ifp);
730 }
731 
732 int
733 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
734 {
735 	struct vte_softc *sc = ifp->if_softc;
736 	struct mii_data *mii = &sc->sc_miibus;
737 	struct ifaddr *ifa = (struct ifaddr *)data;
738 	struct ifreq *ifr = (struct ifreq *)data;
739 	int s, error = 0;
740 
741 	s = splnet();
742 
743 	switch (cmd) {
744 	case SIOCSIFADDR:
745 		ifp->if_flags |= IFF_UP;
746 		if (!(ifp->if_flags & IFF_RUNNING))
747 			vte_init(ifp);
748 #ifdef INET
749 		if (ifa->ifa_addr->sa_family == AF_INET)
750 			arp_ifinit(&sc->sc_arpcom, ifa);
751 #endif
752 		break;
753 	case SIOCSIFFLAGS:
754 		if (ifp->if_flags & IFF_UP) {
755 			if (ifp->if_flags & IFF_RUNNING)
756 				error = ENETRESET;
757 			else
758 				vte_init(ifp);
759 		} else {
760 			if (ifp->if_flags & IFF_RUNNING)
761 				vte_stop(sc);
762 		}
763 		break;
764 	case SIOCSIFMEDIA:
765 	case SIOCGIFMEDIA:
766 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
767 		break;
768 	default:
769 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
770 		break;
771 	}
772 
773 	if (error == ENETRESET) {
774 		if (ifp->if_flags & IFF_RUNNING)
775 			vte_iff(sc);
776 		error = 0;
777 	}
778 
779 	splx(s);
780 	return (error);
781 }
782 
783 void
784 vte_mac_config(struct vte_softc *sc)
785 {
786 	struct mii_data *mii;
787 	uint16_t mcr;
788 
789 	mii = &sc->sc_miibus;
790 	mcr = CSR_READ_2(sc, VTE_MCR0);
791 	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
792 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
793 		mcr |= MCR0_FULL_DUPLEX;
794 #ifdef notyet
795 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
796 			mcr |= MCR0_FC_ENB;
797 		/*
798 		 * The data sheet is not clear whether the controller
799 		 * honors received pause frames or not.  The is no
800 		 * separate control bit for RX pause frame so just
801 		 * enable MCR0_FC_ENB bit.
802 		 */
803 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
804 			mcr |= MCR0_FC_ENB;
805 #endif
806 	}
807 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
808 }
809 
810 void
811 vte_stats_clear(struct vte_softc *sc)
812 {
813 
814 	/* Reading counter registers clears its contents. */
815 	CSR_READ_2(sc, VTE_CNT_RX_DONE);
816 	CSR_READ_2(sc, VTE_CNT_MECNT0);
817 	CSR_READ_2(sc, VTE_CNT_MECNT1);
818 	CSR_READ_2(sc, VTE_CNT_MECNT2);
819 	CSR_READ_2(sc, VTE_CNT_MECNT3);
820 	CSR_READ_2(sc, VTE_CNT_TX_DONE);
821 	CSR_READ_2(sc, VTE_CNT_MECNT4);
822 	CSR_READ_2(sc, VTE_CNT_PAUSE);
823 }
824 
825 void
826 vte_stats_update(struct vte_softc *sc)
827 {
828 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
829 	struct vte_hw_stats *stat;
830 	uint16_t value;
831 
832 	stat = &sc->vte_stats;
833 
834 	CSR_READ_2(sc, VTE_MECISR);
835 	/* RX stats. */
836 	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
837 	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
838 	stat->rx_bcast_frames += (value >> 8);
839 	stat->rx_mcast_frames += (value & 0xFF);
840 	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
841 	stat->rx_runts += (value >> 8);
842 	stat->rx_crcerrs += (value & 0xFF);
843 	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
844 	stat->rx_long_frames += (value & 0xFF);
845 	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
846 	stat->rx_fifo_full += (value >> 8);
847 	stat->rx_desc_unavail += (value & 0xFF);
848 
849 	/* TX stats. */
850 	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
851 	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
852 	stat->tx_underruns += (value >> 8);
853 	stat->tx_late_colls += (value & 0xFF);
854 
855 	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
856 	stat->tx_pause_frames += (value >> 8);
857 	stat->rx_pause_frames += (value & 0xFF);
858 
859 	/* Update ifp counters. */
860 	ifp->if_opackets = stat->tx_frames;
861 	ifp->if_collisions = stat->tx_late_colls;
862 	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
863 	ifp->if_ipackets = stat->rx_frames;
864 	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
865 	    stat->rx_long_frames + stat->rx_fifo_full;
866 }
867 
868 int
869 vte_intr(void *arg)
870 {
871 	struct vte_softc *sc = arg;
872 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
873 	uint16_t status;
874 	int n;
875 	int claimed = 0;
876 
877 	/* Reading VTE_MISR acknowledges interrupts. */
878 	status = CSR_READ_2(sc, VTE_MISR);
879 	if ((status & VTE_INTRS) == 0)
880 		return (0);
881 
882 	/* Disable interrupts. */
883 	CSR_WRITE_2(sc, VTE_MIER, 0);
884 	for (n = 8; (status & VTE_INTRS) != 0;) {
885 		if ((ifp->if_flags & IFF_RUNNING) == 0)
886 			break;
887 		claimed = 1;
888 		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
889 		    MISR_RX_FIFO_FULL))
890 			vte_rxeof(sc);
891 		if (status & MISR_TX_DONE)
892 			vte_txeof(sc);
893 		if (status & MISR_EVENT_CNT_OFLOW)
894 			vte_stats_update(sc);
895 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
896 			vte_start(ifp);
897 		if (--n > 0)
898 			status = CSR_READ_2(sc, VTE_MISR);
899 		else
900 			break;
901 	}
902 
903 	/* Re-enable interrupts. */
904 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
905 
906 	return (claimed);
907 }
908 
909 void
910 vte_txeof(struct vte_softc *sc)
911 {
912 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
913 	struct vte_txdesc *txd;
914 	uint16_t status;
915 	int cons, prog;
916 
917 	if (sc->vte_cdata.vte_tx_cnt == 0)
918 		return;
919 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
920 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
921 	cons = sc->vte_cdata.vte_tx_cons;
922 	/*
923 	 * Go through our TX list and free mbufs for those
924 	 * frames which have been transmitted.
925 	 */
926 	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
927 		txd = &sc->vte_cdata.vte_txdesc[cons];
928 		status = letoh16(txd->tx_desc->dtst);
929 		if (status & VTE_DTST_TX_OWN)
930 			break;
931 		sc->vte_cdata.vte_tx_cnt--;
932 		/* Reclaim transmitted mbufs. */
933 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
934 		if ((txd->tx_flags & VTE_TXMBUF) == 0)
935 			m_freem(txd->tx_m);
936 		txd->tx_flags &= ~VTE_TXMBUF;
937 		txd->tx_m = NULL;
938 		prog++;
939 		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
940 	}
941 
942 	if (prog > 0) {
943 		ifp->if_flags &= ~IFF_OACTIVE;
944 		sc->vte_cdata.vte_tx_cons = cons;
945 		/*
946 		 * Unarm watchdog timer only when there is no pending
947 		 * frames in TX queue.
948 		 */
949 		if (sc->vte_cdata.vte_tx_cnt == 0)
950 			ifp->if_timer = 0;
951 	}
952 }
953 
954 int
955 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
956 {
957 	struct mbuf *m;
958 	bus_dmamap_t map;
959 	int error;
960 
961 	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
962 	if (m == NULL)
963 		return (ENOBUFS);
964 	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
965 	if (!(m->m_flags & M_EXT)) {
966 		m_freem(m);
967 		return (ENOBUFS);
968 	}
969 	m->m_len = m->m_pkthdr.len = MCLBYTES;
970 	m_adj(m, sizeof(uint32_t));
971 
972 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
973 	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
974 
975 	if (error != 0) {
976 		if (!error) {
977 			bus_dmamap_unload(sc->sc_dmat,
978 			    sc->vte_cdata.vte_rx_sparemap);
979 			error = EFBIG;
980 			printf("%s: too many segments?!\n",
981 			    sc->sc_dev.dv_xname);
982 		}
983 		m_freem(m);
984 
985 		if (init)
986 			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
987 		return (error);
988 	}
989 
990 	if (rxd->rx_m != NULL) {
991 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
992 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
993 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
994 	}
995 	map = rxd->rx_dmamap;
996 	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
997 	sc->vte_cdata.vte_rx_sparemap = map;
998 
999 	rxd->rx_m = m;
1000 	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
1001 	rxd->rx_desc->drlen =
1002 	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
1003 	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1004 
1005 	return (0);
1006 }
1007 
1008 void
1009 vte_rxeof(struct vte_softc *sc)
1010 {
1011 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1012 	struct vte_rxdesc *rxd;
1013 	struct mbuf *m;
1014 	uint16_t status, total_len;
1015 	int cons, prog;
1016 
1017 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1018 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1019 	cons = sc->vte_cdata.vte_rx_cons;
1020 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1021 	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1022 		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1023 		status = letoh16(rxd->rx_desc->drst);
1024 		if (status & VTE_DRST_RX_OWN)
1025 			break;
1026 		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1027 		m = rxd->rx_m;
1028 		if ((status & VTE_DRST_RX_OK) == 0) {
1029 			/* Discard errored frame. */
1030 			rxd->rx_desc->drlen =
1031 			    htole16(MCLBYTES - sizeof(uint32_t));
1032 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1033 			continue;
1034 		}
1035 		if (vte_newbuf(sc, rxd, 0) != 0) {
1036 			ifp->if_iqdrops++;
1037 			rxd->rx_desc->drlen =
1038 			    htole16(MCLBYTES - sizeof(uint32_t));
1039 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1040 			continue;
1041 		}
1042 
1043 		/*
1044 		 * It seems there is no way to strip FCS bytes.
1045 		 */
1046 		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1047 		m->m_pkthdr.rcvif = ifp;
1048 
1049 #if NBPFILTER > 0
1050 		if (ifp->if_bpf)
1051 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1052 #endif
1053 
1054 		ether_input_mbuf(ifp, m);
1055 	}
1056 
1057 	if (prog > 0) {
1058 		/* Update the consumer index. */
1059 		sc->vte_cdata.vte_rx_cons = cons;
1060 		/*
1061 		 * Sync updated RX descriptors such that controller see
1062 		 * modified RX buffer addresses.
1063 		 */
1064 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1065 		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1066 		    BUS_DMASYNC_PREWRITE);
1067 #ifdef notyet
1068 		/*
1069 		 * Update residue counter.  Controller does not
1070 		 * keep track of number of available RX descriptors
1071 		 * such that driver should have to update VTE_MRDCR
1072 		 * to make controller know how many free RX
1073 		 * descriptors were added to controller.  This is
1074 		 * a similar mechanism used in VIA velocity
1075 		 * controllers and it indicates controller just
1076 		 * polls OWN bit of current RX descriptor pointer.
1077 		 * A couple of severe issues were seen on sample
1078 		 * board where the controller continuously emits TX
1079 		 * pause frames once RX pause threshold crossed.
1080 		 * Once triggered it never recovered form that
1081 		 * state, I couldn't find a way to make it back to
1082 		 * work at least.  This issue effectively
1083 		 * disconnected the system from network.  Also, the
1084 		 * controller used 00:00:00:00:00:00 as source
1085 		 * station address of TX pause frame. Probably this
1086 		 * is one of reason why vendor recommends not to
1087 		 * enable flow control on R6040 controller.
1088 		 */
1089 		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1090 		    (((VTE_RX_RING_CNT * 2) / 10) <<
1091 		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1092 #endif
1093 	}
1094 }
1095 
1096 void
1097 vte_tick(void *arg)
1098 {
1099 	struct vte_softc *sc = arg;
1100 	struct mii_data *mii = &sc->sc_miibus;
1101 	int s;
1102 
1103 	s = splnet();
1104 	mii_tick(mii);
1105 	vte_stats_update(sc);
1106 	timeout_add_sec(&sc->vte_tick_ch, 1);
1107 	splx(s);
1108 }
1109 
1110 void
1111 vte_reset(struct vte_softc *sc)
1112 {
1113 	uint16_t mcr;
1114 	int i;
1115 
1116 	mcr = CSR_READ_2(sc, VTE_MCR1);
1117 	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1118 	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1119 		DELAY(10);
1120 		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1121 			break;
1122 	}
1123 	if (i == 0)
1124 		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1125 		    mcr);
1126 	/*
1127 	 * Follow the guide of vendor recommended way to reset MAC.
1128 	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1129 	 * not reliable so manually reset internal state machine.
1130 	 */
1131 	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1132 	CSR_WRITE_2(sc, VTE_MACSM, 0);
1133 	DELAY(5000);
1134 }
1135 
1136 int
1137 vte_init(struct ifnet *ifp)
1138 {
1139 	struct vte_softc *sc = ifp->if_softc;
1140 	bus_addr_t paddr;
1141 	uint8_t *eaddr;
1142 	int error;
1143 
1144 	/*
1145 	 * Cancel any pending I/O.
1146 	 */
1147 	vte_stop(sc);
1148 	/*
1149 	 * Reset the chip to a known state.
1150 	 */
1151 	vte_reset(sc);
1152 
1153 	/* Initialize RX descriptors. */
1154 	error = vte_init_rx_ring(sc);
1155 	if (error != 0) {
1156 		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1157 		vte_stop(sc);
1158 		return (error);
1159 	}
1160 	error = vte_init_tx_ring(sc);
1161 	if (error != 0) {
1162 		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1163 		vte_stop(sc);
1164 		return (error);
1165 	}
1166 
1167 	/*
1168 	 * Reprogram the station address.  Controller supports up
1169 	 * to 4 different station addresses so driver programs the
1170 	 * first station address as its own ethernet address and
1171 	 * configure the remaining three addresses as perfect
1172 	 * multicast addresses.
1173 	 */
1174 	eaddr = LLADDR(ifp->if_sadl);
1175 	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1176 	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1177 	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1178 
1179 	/* Set TX descriptor base addresses. */
1180 	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1181 	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1182 	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1183 	/* Set RX descriptor base addresses. */
1184 	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1185 	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1186 	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1187 	/*
1188 	 * Initialize RX descriptor residue counter and set RX
1189 	 * pause threshold to 20% of available RX descriptors.
1190 	 * See comments on vte_rxeof() for details on flow control
1191 	 * issues.
1192 	 */
1193 	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1194 	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1195 
1196 	/*
1197 	 * Always use maximum frame size that controller can
1198 	 * support.  Otherwise received frames that has longer
1199 	 * frame length than vte(4) MTU would be silently dropped
1200 	 * in controller.  This would break path-MTU discovery as
1201 	 * sender wouldn't get any responses from receiver. The
1202 	 * RX buffer size should be multiple of 4.
1203 	 * Note, jumbo frames are silently ignored by controller
1204 	 * and even MAC counters do not detect them.
1205 	 */
1206 	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1207 
1208 	/* Configure FIFO. */
1209 	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1210 	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1211 	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1212 
1213 	/*
1214 	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1215 	 * control configuration is done after detecting a valid
1216 	 * link.  Note, we don't generate early interrupt here
1217 	 * as well since FreeBSD does not have interrupt latency
1218 	 * problems like Windows.
1219 	 */
1220 	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1221 	/*
1222 	 * We manually keep track of PHY status changes to
1223 	 * configure resolved duplex and flow control since only
1224 	 * duplex configuration can be automatically reflected to
1225 	 * MCR0.
1226 	 */
1227 	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1228 	    MCR1_EXCESS_COL_RETRY_16);
1229 
1230 	/* Initialize RX filter. */
1231 	vte_iff(sc);
1232 
1233 	/* Disable TX/RX interrupt moderation control. */
1234 	CSR_WRITE_2(sc, VTE_MRICR, 0);
1235 	CSR_WRITE_2(sc, VTE_MTICR, 0);
1236 
1237 	/* Enable MAC event counter interrupts. */
1238 	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1239 	/* Clear MAC statistics. */
1240 	vte_stats_clear(sc);
1241 
1242 	/* Acknowledge all pending interrupts and clear it. */
1243 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1244 	CSR_WRITE_2(sc, VTE_MISR, 0);
1245 
1246 	sc->vte_flags &= ~VTE_FLAG_LINK;
1247 	/* Switch to the current media. */
1248 	vte_mediachange(ifp);
1249 
1250 	timeout_add_sec(&sc->vte_tick_ch, 1);
1251 
1252 	ifp->if_flags |= IFF_RUNNING;
1253 	ifp->if_flags &= ~IFF_OACTIVE;
1254 
1255 	return (0);
1256 }
1257 
1258 void
1259 vte_stop(struct vte_softc *sc)
1260 {
1261 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1262 	struct vte_txdesc *txd;
1263 	struct vte_rxdesc *rxd;
1264 	int i;
1265 
1266 	/*
1267 	 * Mark the interface down and cancel the watchdog timer.
1268 	 */
1269 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1270 	ifp->if_timer = 0;
1271 	sc->vte_flags &= ~VTE_FLAG_LINK;
1272 	timeout_del(&sc->vte_tick_ch);
1273 	vte_stats_update(sc);
1274 	/* Disable interrupts. */
1275 	CSR_WRITE_2(sc, VTE_MIER, 0);
1276 	CSR_WRITE_2(sc, VTE_MECIER, 0);
1277 	/* Stop RX/TX MACs. */
1278 	vte_stop_mac(sc);
1279 	/* Clear interrupts. */
1280 	CSR_READ_2(sc, VTE_MISR);
1281 	/*
1282 	 * Free TX/RX mbufs still in the queues.
1283 	 */
1284 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1285 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1286 		if (rxd->rx_m != NULL) {
1287 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1288 			m_freem(rxd->rx_m);
1289 			rxd->rx_m = NULL;
1290 		}
1291 	}
1292 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1293 		txd = &sc->vte_cdata.vte_txdesc[i];
1294 		if (txd->tx_m != NULL) {
1295 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1296 			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1297 				m_freem(txd->tx_m);
1298 			txd->tx_m = NULL;
1299 			txd->tx_flags &= ~VTE_TXMBUF;
1300 		}
1301 	}
1302 	/* Free TX mbuf pools used for deep copy. */
1303 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1304 		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1305 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1306 			sc->vte_cdata.vte_txmbufs[i] = NULL;
1307 		}
1308 	}
1309 }
1310 
1311 void
1312 vte_start_mac(struct vte_softc *sc)
1313 {
1314 	uint16_t mcr;
1315 	int i;
1316 
1317 	/* Enable RX/TX MACs. */
1318 	mcr = CSR_READ_2(sc, VTE_MCR0);
1319 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1320 	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1321 		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1322 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1323 		for (i = VTE_TIMEOUT; i > 0; i--) {
1324 			mcr = CSR_READ_2(sc, VTE_MCR0);
1325 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1326 			    (MCR0_RX_ENB | MCR0_TX_ENB))
1327 				break;
1328 			DELAY(10);
1329 		}
1330 		if (i == 0)
1331 			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1332 			    sc->sc_dev.dv_xname, mcr);
1333 	}
1334 }
1335 
1336 void
1337 vte_stop_mac(struct vte_softc *sc)
1338 {
1339 	uint16_t mcr;
1340 	int i;
1341 
1342 	/* Disable RX/TX MACs. */
1343 	mcr = CSR_READ_2(sc, VTE_MCR0);
1344 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1345 		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1346 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1347 		for (i = VTE_TIMEOUT; i > 0; i--) {
1348 			mcr = CSR_READ_2(sc, VTE_MCR0);
1349 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1350 				break;
1351 			DELAY(10);
1352 		}
1353 		if (i == 0)
1354 			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1355 			    sc->sc_dev.dv_xname, mcr);
1356 	}
1357 }
1358 
1359 int
1360 vte_init_tx_ring(struct vte_softc *sc)
1361 {
1362 	struct vte_tx_desc *desc;
1363 	struct vte_txdesc *txd;
1364 	bus_addr_t addr;
1365 	int i;
1366 
1367 	sc->vte_cdata.vte_tx_prod = 0;
1368 	sc->vte_cdata.vte_tx_cons = 0;
1369 	sc->vte_cdata.vte_tx_cnt = 0;
1370 
1371 	/* Pre-allocate TX mbufs for deep copy. */
1372 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1373 		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1374 		    M_DONTWAIT, MT_DATA);
1375 		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1376 			return (ENOBUFS);
1377 		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1378 		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1379 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1380 			return (ENOBUFS);
1381 		}
1382 		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1383 		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1384 	}
1385 	desc = sc->vte_cdata.vte_tx_ring;
1386 	bzero(desc, VTE_TX_RING_SZ);
1387 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1388 		txd = &sc->vte_cdata.vte_txdesc[i];
1389 		txd->tx_m = NULL;
1390 		if (i != VTE_TX_RING_CNT - 1)
1391 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1392 			    sizeof(struct vte_tx_desc) * (i + 1);
1393 		else
1394 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1395 			    sizeof(struct vte_tx_desc) * 0;
1396 		desc = &sc->vte_cdata.vte_tx_ring[i];
1397 		desc->dtnp = htole32(addr);
1398 		txd->tx_desc = desc;
1399 	}
1400 
1401 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1402 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1403 	return (0);
1404 }
1405 
1406 int
1407 vte_init_rx_ring(struct vte_softc *sc)
1408 {
1409 	struct vte_rx_desc *desc;
1410 	struct vte_rxdesc *rxd;
1411 	bus_addr_t addr;
1412 	int i;
1413 
1414 	sc->vte_cdata.vte_rx_cons = 0;
1415 	desc = sc->vte_cdata.vte_rx_ring;
1416 	bzero(desc, VTE_RX_RING_SZ);
1417 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1418 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1419 		rxd->rx_m = NULL;
1420 		if (i != VTE_RX_RING_CNT - 1)
1421 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1422 			    sizeof(struct vte_rx_desc) * (i + 1);
1423 		else
1424 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1425 			    sizeof(struct vte_rx_desc) * 0;
1426 		desc = &sc->vte_cdata.vte_rx_ring[i];
1427 		desc->drnp = htole32(addr);
1428 		rxd->rx_desc = desc;
1429 		if (vte_newbuf(sc, rxd, 1) != 0)
1430 			return (ENOBUFS);
1431 	}
1432 
1433 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1434 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1435 
1436 	return (0);
1437 }
1438 
1439 void
1440 vte_iff(struct vte_softc *sc)
1441 {
1442 	struct arpcom *ac = &sc->sc_arpcom;
1443 	struct ifnet *ifp = &ac->ac_if;
1444 	struct ether_multi *enm;
1445 	struct ether_multistep step;
1446 	uint8_t *eaddr;
1447 	uint32_t crc;
1448 	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1449 	uint16_t mchash[4], mcr;
1450 	int i, nperf;
1451 
1452 	bzero(mchash, sizeof(mchash));
1453 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1454 		rxfilt_perf[i][0] = 0xFFFF;
1455 		rxfilt_perf[i][1] = 0xFFFF;
1456 		rxfilt_perf[i][2] = 0xFFFF;
1457 	}
1458 
1459 	mcr = CSR_READ_2(sc, VTE_MCR0);
1460 	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1461 	ifp->if_flags &= ~IFF_ALLMULTI;
1462 
1463 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1464 		ifp->if_flags |= IFF_ALLMULTI;
1465 		if (ifp->if_flags & IFF_PROMISC)
1466 			mcr |= MCR0_PROMISC;
1467 		else
1468 			mcr |= MCR0_MULTICAST;
1469 		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1470 	} else {
1471 		nperf = 0;
1472 		ETHER_FIRST_MULTI(step, ac, enm);
1473 		while (enm != NULL) {
1474 			/*
1475 			 * Program the first 3 multicast groups into
1476 			 * the perfect filter.  For all others, use the
1477 			 * hash table.
1478 			 */
1479 			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1480 				eaddr = enm->enm_addrlo;
1481 				rxfilt_perf[nperf][0] =
1482 				    eaddr[1] << 8 | eaddr[0];
1483 				rxfilt_perf[nperf][1] =
1484 				    eaddr[3] << 8 | eaddr[2];
1485 				rxfilt_perf[nperf][2] =
1486 				    eaddr[5] << 8 | eaddr[4];
1487 				nperf++;
1488 				continue;
1489 			}
1490 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1491 			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1492 			ETHER_NEXT_MULTI(step, enm);
1493 		}
1494 		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1495 		    mchash[3] != 0)
1496 			mcr |= MCR0_MULTICAST;
1497 	}
1498 
1499 	/* Program multicast hash table. */
1500 	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1501 	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1502 	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1503 	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1504 	/* Program perfect filter table. */
1505 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1506 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1507 		    rxfilt_perf[i][0]);
1508 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1509 		    rxfilt_perf[i][1]);
1510 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1511 		    rxfilt_perf[i][2]);
1512 	}
1513 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1514 	CSR_READ_2(sc, VTE_MCR0);
1515 }
1516