xref: /openbsd-src/sys/dev/pci/if_vte.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_vte.c,v 1.8 2013/11/21 16:16:08 mpi Exp $	*/
2 /*-
3  * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30 
31 #include "bpfilter.h"
32 #include "vlan.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/sockio.h>
39 #include <sys/mbuf.h>
40 #include <sys/queue.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/timeout.h>
44 #include <sys/socket.h>
45 
46 #include <machine/bus.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/ip.h>
56 #include <netinet/if_ether.h>
57 #endif
58 
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <dev/rndvar.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 #include <dev/pci/pcidevs.h>
74 
75 #include <dev/pci/if_vtereg.h>
76 
77 int	vte_match(struct device *, void *, void *);
78 void	vte_attach(struct device *, struct device *, void *);
79 int	vte_detach(struct device *, int);
80 
81 int	vte_miibus_readreg(struct device *, int, int);
82 void	vte_miibus_writereg(struct device *, int, int, int);
83 void	vte_miibus_statchg(struct device *);
84 
85 int	vte_init(struct ifnet *);
86 void	vte_start(struct ifnet *);
87 int	vte_ioctl(struct ifnet *, u_long, caddr_t);
88 void	vte_watchdog(struct ifnet *);
89 int	vte_mediachange(struct ifnet *);
90 void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
91 
92 int	vte_intr(void *);
93 int	vte_dma_alloc(struct vte_softc *);
94 void	vte_dma_free(struct vte_softc *);
95 struct vte_txdesc *
96 	    vte_encap(struct vte_softc *, struct mbuf **);
97 void	vte_get_macaddr(struct vte_softc *);
98 int	vte_init_rx_ring(struct vte_softc *);
99 int	vte_init_tx_ring(struct vte_softc *);
100 void	vte_mac_config(struct vte_softc *);
101 int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
102 void	vte_reset(struct vte_softc *);
103 void	vte_rxeof(struct vte_softc *);
104 void	vte_iff(struct vte_softc *);
105 void	vte_start_mac(struct vte_softc *);
106 void	vte_stats_clear(struct vte_softc *);
107 void	vte_stats_update(struct vte_softc *);
108 void	vte_stop(struct vte_softc *);
109 void	vte_stop_mac(struct vte_softc *);
110 void	vte_tick(void *);
111 void	vte_txeof(struct vte_softc *);
112 
113 const struct pci_matchid vte_devices[] = {
114 	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
115 };
116 
117 struct cfattach vte_ca = {
118 	sizeof(struct vte_softc), vte_match, vte_attach
119 };
120 
121 struct cfdriver vte_cd = {
122 	NULL, "vte", DV_IFNET
123 };
124 
125 int vtedebug = 0;
126 #define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
127 
128 int
129 vte_miibus_readreg(struct device *dev, int phy, int reg)
130 {
131 	struct vte_softc *sc = (struct vte_softc *)dev;
132 	int i;
133 
134 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
135 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
136 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
137 		DELAY(5);
138 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
139 			break;
140 	}
141 
142 	if (i == 0) {
143 		printf("%s: phy read timeout: phy %d, reg %d\n",
144 		    sc->sc_dev.dv_xname, phy, reg);
145 		return (0);
146 	}
147 
148 	return (CSR_READ_2(sc, VTE_MMRD));
149 }
150 
151 void
152 vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
153 {
154 	struct vte_softc *sc = (struct vte_softc *)dev;
155 	int i;
156 
157 	CSR_WRITE_2(sc, VTE_MMWD, val);
158 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
159 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
160 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
161 		DELAY(5);
162 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
163 			break;
164 	}
165 
166 	if (i == 0)
167 		printf("%s: phy write timeout: phy %d, reg %d\n",
168 		    sc->sc_dev.dv_xname, phy, reg);
169 }
170 
171 void
172 vte_miibus_statchg(struct device *dev)
173 {
174 	struct vte_softc *sc = (struct vte_softc *)dev;
175 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
176 	struct mii_data *mii;
177 	uint16_t val;
178 
179 	if ((ifp->if_flags & IFF_RUNNING) == 0)
180 		return;
181 
182 	mii = &sc->sc_miibus;
183 
184 	sc->vte_flags &= ~VTE_FLAG_LINK;
185 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
186 	    (IFM_ACTIVE | IFM_AVALID)) {
187 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
188 		case IFM_10_T:
189 		case IFM_100_TX:
190 			sc->vte_flags |= VTE_FLAG_LINK;
191 			break;
192 		default:
193 			break;
194 		}
195 	}
196 
197 	/* Stop RX/TX MACs. */
198 	vte_stop_mac(sc);
199 	/* Program MACs with resolved duplex and flow control. */
200 	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
201 		/*
202 		 * Timer waiting time : (63 + TIMER * 64) MII clock.
203 		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
204 		 */
205 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
206 			val = 18 << VTE_IM_TIMER_SHIFT;
207 		else
208 			val = 1 << VTE_IM_TIMER_SHIFT;
209 		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
210 		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
211 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
212 		CSR_WRITE_2(sc, VTE_MRICR, val);
213 
214 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
215 			val = 18 << VTE_IM_TIMER_SHIFT;
216 		else
217 			val = 1 << VTE_IM_TIMER_SHIFT;
218 		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
219 		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
220 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
221 		CSR_WRITE_2(sc, VTE_MTICR, val);
222 
223 		vte_mac_config(sc);
224 		vte_start_mac(sc);
225 	}
226 }
227 
228 void
229 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
230 {
231 	struct vte_softc *sc = ifp->if_softc;
232 	struct mii_data *mii = &sc->sc_miibus;
233 
234 	mii_pollstat(mii);
235 	ifmr->ifm_status = mii->mii_media_status;
236 	ifmr->ifm_active = mii->mii_media_active;
237 }
238 
239 int
240 vte_mediachange(struct ifnet *ifp)
241 {
242 	struct vte_softc *sc = ifp->if_softc;
243 	struct mii_data *mii = &sc->sc_miibus;
244 	int error;
245 
246 	if (mii->mii_instance != 0) {
247 		struct mii_softc *miisc;
248 
249 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
250 			mii_phy_reset(miisc);
251 	}
252 	error = mii_mediachg(mii);
253 
254 	return (error);
255 }
256 
257 int
258 vte_match(struct device *dev, void *match, void *aux)
259 {
260 	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
261 	    sizeof(vte_devices) / sizeof(vte_devices[0]));
262 }
263 
264 void
265 vte_get_macaddr(struct vte_softc *sc)
266 {
267 	uint16_t mid;
268 
269 	/*
270 	 * It seems there is no way to reload station address and
271 	 * it is supposed to be set by BIOS.
272 	 */
273 	mid = CSR_READ_2(sc, VTE_MID0L);
274 	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
275 	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
276 	mid = CSR_READ_2(sc, VTE_MID0M);
277 	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
278 	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
279 	mid = CSR_READ_2(sc, VTE_MID0H);
280 	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
281 	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
282 }
283 
284 void
285 vte_attach(struct device *parent, struct device *self, void *aux)
286 {
287 	struct vte_softc *sc = (struct vte_softc *)self;
288 	struct pci_attach_args *pa = aux;
289 	pci_chipset_tag_t pc = pa->pa_pc;
290 	pci_intr_handle_t ih;
291 	const char *intrstr;
292 	struct ifnet *ifp;
293 	pcireg_t memtype;
294 	int error = 0;
295 
296 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
297 	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
298 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
299 		printf(": can't map mem space\n");
300 		return;
301 	}
302 
303 	if (pci_intr_map(pa, &ih) != 0) {
304 		printf(": can't map interrupt\n");
305 		goto fail;
306 	}
307 
308   	/*
309 	 * Allocate IRQ
310 	 */
311 	intrstr = pci_intr_string(pc, ih);
312 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
313 	    sc->sc_dev.dv_xname);
314 	if (sc->sc_irq_handle == NULL) {
315 		printf(": could not establish interrupt");
316 		if (intrstr != NULL)
317 			printf(" at %s", intrstr);
318 		printf("\n");
319 		goto fail;
320 	}
321 	printf(": %s", intrstr);
322 
323 	sc->sc_dmat = pa->pa_dmat;
324 	sc->sc_pct = pa->pa_pc;
325 	sc->sc_pcitag = pa->pa_tag;
326 
327 	/* Reset the ethernet controller. */
328 	vte_reset(sc);
329 
330 	error = vte_dma_alloc(sc);
331 	if (error)
332 		goto fail;
333 
334 	/* Load station address. */
335 	vte_get_macaddr(sc);
336 
337 	ifp = &sc->sc_arpcom.ac_if;
338 	ifp->if_softc = sc;
339 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
340 	ifp->if_ioctl = vte_ioctl;
341 	ifp->if_start = vte_start;
342 	ifp->if_watchdog = vte_watchdog;
343 	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
344 	IFQ_SET_READY(&ifp->if_snd);
345 	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
346 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
347 
348 	ifp->if_capabilities = IFCAP_VLAN_MTU;
349 
350 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
351 
352 	/*
353 	 * Set up MII bus.
354 	 * BIOS would have initialized VTE_MPSCCR to catch PHY
355 	 * status changes so driver may be able to extract
356 	 * configured PHY address.  Since it's common to see BIOS
357 	 * fails to initialize the register(including the sample
358 	 * board I have), let mii(4) probe it.  This is more
359 	 * reliable than relying on BIOS's initialization.
360 	 *
361 	 * Advertising flow control capability to mii(4) was
362 	 * intentionally disabled due to severe problems in TX
363 	 * pause frame generation.  See vte_rxeof() for more
364 	 * details.
365 	 */
366 	sc->sc_miibus.mii_ifp = ifp;
367 	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
368 	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
369 	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
370 
371 	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
372 	    vte_mediastatus);
373 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
374 	    MII_OFFSET_ANY, 0);
375 
376 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
377 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
378 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
379 		    0, NULL);
380 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
381 	} else
382 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
383 
384 	if_attach(ifp);
385 	ether_ifattach(ifp);
386 
387 	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
388 	return;
389 fail:
390 	vte_detach(&sc->sc_dev, 0);
391 }
392 
393 int
394 vte_detach(struct device *self, int flags)
395 {
396 	struct vte_softc *sc = (struct vte_softc *)self;
397 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
398 	int s;
399 
400 	s = splnet();
401 	vte_stop(sc);
402 	splx(s);
403 
404 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
405 
406 	/* Delete all remaining media. */
407 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
408 
409 	ether_ifdetach(ifp);
410 	if_detach(ifp);
411 	vte_dma_free(sc);
412 
413 	if (sc->sc_irq_handle != NULL) {
414 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
415 		sc->sc_irq_handle = NULL;
416 	}
417 
418 	return (0);
419 }
420 
421 int
422 vte_dma_alloc(struct vte_softc *sc)
423 {
424 	struct vte_txdesc *txd;
425 	struct vte_rxdesc *rxd;
426 	int error, i, nsegs;
427 
428 	/* Create DMA stuffs for TX ring */
429 	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
430 	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
431 	if (error)
432 		return (ENOBUFS);
433 
434 	/* Allocate DMA'able memory for TX ring */
435 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
436 	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
437 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
438 	if (error) {
439 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
440 		    sc->sc_dev.dv_xname);
441 		return (error);
442 	}
443 
444 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
445 	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
446 	    BUS_DMA_NOWAIT);
447 	if (error)
448 		return (ENOBUFS);
449 
450 	/*  Load the DMA map for Tx ring. */
451 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
452 	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
453 	if (error) {
454 		printf("%s: could not load DMA'able memory for Tx ring.\n",
455 		    sc->sc_dev.dv_xname);
456 		bus_dmamem_free(sc->sc_dmat,
457 		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
458 		return (error);
459 	}
460 
461 	sc->vte_cdata.vte_tx_ring_paddr =
462 	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
463 
464 	/* Create DMA stuffs for RX ring */
465 	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
466 	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
467 	if (error)
468 		return (ENOBUFS);
469 
470 	/* Allocate DMA'able memory for RX ring */
471 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
472 	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
473 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
474 	if (error) {
475 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
476 		    sc->sc_dev.dv_xname);
477 		return (error);
478 	}
479 
480 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
481 	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
482 	    BUS_DMA_NOWAIT);
483 	if (error)
484 		return (ENOBUFS);
485 
486 	/* Load the DMA map for Rx ring. */
487 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
488 	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
489 	if (error) {
490 		printf("%s: could not load DMA'able memory for Rx ring.\n",
491 		    sc->sc_dev.dv_xname);
492 		bus_dmamem_free(sc->sc_dmat,
493 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
494 		return (error);
495 	}
496 
497 	sc->vte_cdata.vte_rx_ring_paddr =
498 	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
499 
500 	/* Create DMA maps for Tx buffers. */
501 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
502 		txd = &sc->vte_cdata.vte_txdesc[i];
503 		txd->tx_m = NULL;
504 		txd->tx_dmamap = NULL;
505 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
506 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
507 		if (error) {
508 			printf("%s: could not create Tx dmamap.\n",
509 			    sc->sc_dev.dv_xname);
510 			return (error);
511 		}
512 	}
513 
514 	/* Create DMA maps for Rx buffers. */
515 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
516 	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
517 	if (error) {
518 		printf("%s: could not create spare Rx dmamap.\n",
519 		    sc->sc_dev.dv_xname);
520 		return (error);
521 	}
522 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
523 		rxd = &sc->vte_cdata.vte_rxdesc[i];
524 		rxd->rx_m = NULL;
525 		rxd->rx_dmamap = NULL;
526 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
527 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
528 		if (error) {
529 			printf("%s: could not create Rx dmamap.\n",
530 			    sc->sc_dev.dv_xname);
531 			return (error);
532 		}
533 	}
534 
535 	return (0);
536 }
537 
538 void
539 vte_dma_free(struct vte_softc *sc)
540 {
541 	struct vte_txdesc *txd;
542 	struct vte_rxdesc *rxd;
543 	int i;
544 
545 	/* TX buffers. */
546 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
547 		txd = &sc->vte_cdata.vte_txdesc[i];
548 		if (txd->tx_dmamap != NULL) {
549 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
550 			txd->tx_dmamap = NULL;
551 		}
552 	}
553 	/* Rx buffers */
554 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
555 		rxd = &sc->vte_cdata.vte_rxdesc[i];
556 		if (rxd->rx_dmamap != NULL) {
557 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
558 			rxd->rx_dmamap = NULL;
559 		}
560 	}
561 	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
562 		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
563 		sc->vte_cdata.vte_rx_sparemap = NULL;
564 	}
565 	/* TX descriptor ring. */
566 	if (sc->vte_cdata.vte_tx_ring_map != NULL)
567 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
568 	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
569 	    sc->vte_cdata.vte_tx_ring != NULL)
570 		bus_dmamem_free(sc->sc_dmat,
571 		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
572 	sc->vte_cdata.vte_tx_ring = NULL;
573 	sc->vte_cdata.vte_tx_ring_map = NULL;
574 	/* RX ring. */
575 	if (sc->vte_cdata.vte_rx_ring_map != NULL)
576 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
577 	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
578 	    sc->vte_cdata.vte_rx_ring != NULL)
579 		bus_dmamem_free(sc->sc_dmat,
580 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
581 	sc->vte_cdata.vte_rx_ring = NULL;
582 	sc->vte_cdata.vte_rx_ring_map = NULL;
583 }
584 
585 struct vte_txdesc *
586 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
587 {
588 	struct vte_txdesc *txd;
589 	struct mbuf *m, *n;
590 	int copy, error, padlen;
591 
592 	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
593 	m = *m_head;
594 	/*
595 	 * Controller doesn't auto-pad, so we have to make sure pad
596 	 * short frames out to the minimum frame length.
597 	 */
598 	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
599 		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
600 	else
601 		padlen = 0;
602 
603 	/*
604 	 * Controller does not support multi-fragmented TX buffers.
605 	 * Controller spends most of its TX processing time in
606 	 * de-fragmenting TX buffers.  Either faster CPU or more
607 	 * advanced controller DMA engine is required to speed up
608 	 * TX path processing.
609 	 * To mitigate the de-fragmenting issue, perform deep copy
610 	 * from fragmented mbuf chains to a pre-allocated mbuf
611 	 * cluster with extra cost of kernel memory.  For frames
612 	 * that is composed of single TX buffer, the deep copy is
613 	 * bypassed.
614 	 */
615 	copy = 0;
616 	if (m->m_next != NULL)
617 		copy++;
618 	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
619 		copy++;
620 	if (copy != 0) {
621 		/* Avoid expensive m_defrag(9) and do deep copy. */
622 		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
623 		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
624 		n->m_pkthdr.len = m->m_pkthdr.len;
625 		n->m_len = m->m_pkthdr.len;
626 		m = n;
627 		txd->tx_flags |= VTE_TXMBUF;
628 	}
629 
630 	if (padlen > 0) {
631 		/* Zero out the bytes in the pad area. */
632 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
633 		m->m_pkthdr.len += padlen;
634 		m->m_len = m->m_pkthdr.len;
635 	}
636 
637 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
638 	    BUS_DMA_NOWAIT);
639 
640 	if (error != 0) {
641 		txd->tx_flags &= ~VTE_TXMBUF;
642 		return (NULL);
643 	}
644 
645 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
646 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
647 
648 	txd->tx_desc->dtlen =
649 	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
650 	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
651 	sc->vte_cdata.vte_tx_cnt++;
652 	/* Update producer index. */
653 	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
654 
655 	/* Finally hand over ownership to controller. */
656 	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
657 	txd->tx_m = m;
658 
659 	return (txd);
660 }
661 
662 void
663 vte_start(struct ifnet *ifp)
664 {
665 	struct vte_softc *sc = ifp->if_softc;
666 	struct vte_txdesc *txd;
667 	struct mbuf *m_head;
668 	int enq = 0;
669 
670 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
671 		return;
672 
673 	for (;;) {
674 		/* Reserve one free TX descriptor. */
675 		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
676 			ifp->if_flags |= IFF_OACTIVE;
677 			break;
678 		}
679 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
680 		if (m_head == NULL)
681 			break;
682 
683 		/*
684 		 * Pack the data into the transmit ring. If we
685 		 * don't have room, set the OACTIVE flag and wait
686 		 * for the NIC to drain the ring.
687 		 */
688 		if ((txd = vte_encap(sc, &m_head)) == NULL) {
689 			break;
690 		}
691 
692 		enq++;
693 
694 #if NBPFILTER > 0
695 		/*
696 		 * If there's a BPF listener, bounce a copy of this frame
697 		 * to him.
698 		 */
699 		if (ifp->if_bpf != NULL)
700 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
701 #endif
702 		/* Free consumed TX frame. */
703 		if ((txd->tx_flags & VTE_TXMBUF) != 0)
704 			m_freem(m_head);
705 	}
706 
707 	if (enq > 0) {
708 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
709 		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
710 		    BUS_DMASYNC_PREWRITE);
711 		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
712 		ifp->if_timer = VTE_TX_TIMEOUT;
713 	}
714 }
715 
716 void
717 vte_watchdog(struct ifnet *ifp)
718 {
719 	struct vte_softc *sc = ifp->if_softc;
720 
721 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
722 	ifp->if_oerrors++;
723 	vte_init(ifp);
724 
725 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
726 		vte_start(ifp);
727 }
728 
729 int
730 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
731 {
732 	struct vte_softc *sc = ifp->if_softc;
733 	struct mii_data *mii = &sc->sc_miibus;
734 	struct ifaddr *ifa = (struct ifaddr *)data;
735 	struct ifreq *ifr = (struct ifreq *)data;
736 	int s, error = 0;
737 
738 	s = splnet();
739 
740 	switch (cmd) {
741 	case SIOCSIFADDR:
742 		ifp->if_flags |= IFF_UP;
743 		if (!(ifp->if_flags & IFF_RUNNING))
744 			vte_init(ifp);
745 #ifdef INET
746 		if (ifa->ifa_addr->sa_family == AF_INET)
747 			arp_ifinit(&sc->sc_arpcom, ifa);
748 #endif
749 		break;
750 	case SIOCSIFFLAGS:
751 		if (ifp->if_flags & IFF_UP) {
752 			if (ifp->if_flags & IFF_RUNNING)
753 				error = ENETRESET;
754 			else
755 				vte_init(ifp);
756 		} else {
757 			if (ifp->if_flags & IFF_RUNNING)
758 				vte_stop(sc);
759 		}
760 		break;
761 	case SIOCSIFMEDIA:
762 	case SIOCGIFMEDIA:
763 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
764 		break;
765 	default:
766 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
767 		break;
768 	}
769 
770 	if (error == ENETRESET) {
771 		if (ifp->if_flags & IFF_RUNNING)
772 			vte_iff(sc);
773 		error = 0;
774 	}
775 
776 	splx(s);
777 	return (error);
778 }
779 
780 void
781 vte_mac_config(struct vte_softc *sc)
782 {
783 	struct mii_data *mii;
784 	uint16_t mcr;
785 
786 	mii = &sc->sc_miibus;
787 	mcr = CSR_READ_2(sc, VTE_MCR0);
788 	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
789 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
790 		mcr |= MCR0_FULL_DUPLEX;
791 #ifdef notyet
792 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
793 			mcr |= MCR0_FC_ENB;
794 		/*
795 		 * The data sheet is not clear whether the controller
796 		 * honors received pause frames or not.  The is no
797 		 * separate control bit for RX pause frame so just
798 		 * enable MCR0_FC_ENB bit.
799 		 */
800 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
801 			mcr |= MCR0_FC_ENB;
802 #endif
803 	}
804 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
805 }
806 
807 void
808 vte_stats_clear(struct vte_softc *sc)
809 {
810 
811 	/* Reading counter registers clears its contents. */
812 	CSR_READ_2(sc, VTE_CNT_RX_DONE);
813 	CSR_READ_2(sc, VTE_CNT_MECNT0);
814 	CSR_READ_2(sc, VTE_CNT_MECNT1);
815 	CSR_READ_2(sc, VTE_CNT_MECNT2);
816 	CSR_READ_2(sc, VTE_CNT_MECNT3);
817 	CSR_READ_2(sc, VTE_CNT_TX_DONE);
818 	CSR_READ_2(sc, VTE_CNT_MECNT4);
819 	CSR_READ_2(sc, VTE_CNT_PAUSE);
820 }
821 
822 void
823 vte_stats_update(struct vte_softc *sc)
824 {
825 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
826 	struct vte_hw_stats *stat;
827 	uint16_t value;
828 
829 	stat = &sc->vte_stats;
830 
831 	CSR_READ_2(sc, VTE_MECISR);
832 	/* RX stats. */
833 	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
834 	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
835 	stat->rx_bcast_frames += (value >> 8);
836 	stat->rx_mcast_frames += (value & 0xFF);
837 	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
838 	stat->rx_runts += (value >> 8);
839 	stat->rx_crcerrs += (value & 0xFF);
840 	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
841 	stat->rx_long_frames += (value & 0xFF);
842 	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
843 	stat->rx_fifo_full += (value >> 8);
844 	stat->rx_desc_unavail += (value & 0xFF);
845 
846 	/* TX stats. */
847 	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
848 	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
849 	stat->tx_underruns += (value >> 8);
850 	stat->tx_late_colls += (value & 0xFF);
851 
852 	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
853 	stat->tx_pause_frames += (value >> 8);
854 	stat->rx_pause_frames += (value & 0xFF);
855 
856 	/* Update ifp counters. */
857 	ifp->if_opackets = stat->tx_frames;
858 	ifp->if_collisions = stat->tx_late_colls;
859 	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
860 	ifp->if_ipackets = stat->rx_frames;
861 	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
862 	    stat->rx_long_frames + stat->rx_fifo_full;
863 }
864 
865 int
866 vte_intr(void *arg)
867 {
868 	struct vte_softc *sc = arg;
869 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
870 	uint16_t status;
871 	int n;
872 	int claimed = 0;
873 
874 	/* Reading VTE_MISR acknowledges interrupts. */
875 	status = CSR_READ_2(sc, VTE_MISR);
876 	if ((status & VTE_INTRS) == 0)
877 		return (0);
878 
879 	/* Disable interrupts. */
880 	CSR_WRITE_2(sc, VTE_MIER, 0);
881 	for (n = 8; (status & VTE_INTRS) != 0;) {
882 		if ((ifp->if_flags & IFF_RUNNING) == 0)
883 			break;
884 		claimed = 1;
885 		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
886 		    MISR_RX_FIFO_FULL))
887 			vte_rxeof(sc);
888 		if (status & MISR_TX_DONE)
889 			vte_txeof(sc);
890 		if (status & MISR_EVENT_CNT_OFLOW)
891 			vte_stats_update(sc);
892 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
893 			vte_start(ifp);
894 		if (--n > 0)
895 			status = CSR_READ_2(sc, VTE_MISR);
896 		else
897 			break;
898 	}
899 
900 	/* Re-enable interrupts. */
901 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
902 
903 	return (claimed);
904 }
905 
906 void
907 vte_txeof(struct vte_softc *sc)
908 {
909 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
910 	struct vte_txdesc *txd;
911 	uint16_t status;
912 	int cons, prog;
913 
914 	if (sc->vte_cdata.vte_tx_cnt == 0)
915 		return;
916 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
917 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
918 	cons = sc->vte_cdata.vte_tx_cons;
919 	/*
920 	 * Go through our TX list and free mbufs for those
921 	 * frames which have been transmitted.
922 	 */
923 	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
924 		txd = &sc->vte_cdata.vte_txdesc[cons];
925 		status = letoh16(txd->tx_desc->dtst);
926 		if (status & VTE_DTST_TX_OWN)
927 			break;
928 		sc->vte_cdata.vte_tx_cnt--;
929 		/* Reclaim transmitted mbufs. */
930 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
931 		if ((txd->tx_flags & VTE_TXMBUF) == 0)
932 			m_freem(txd->tx_m);
933 		txd->tx_flags &= ~VTE_TXMBUF;
934 		txd->tx_m = NULL;
935 		prog++;
936 		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
937 	}
938 
939 	if (prog > 0) {
940 		ifp->if_flags &= ~IFF_OACTIVE;
941 		sc->vte_cdata.vte_tx_cons = cons;
942 		/*
943 		 * Unarm watchdog timer only when there is no pending
944 		 * frames in TX queue.
945 		 */
946 		if (sc->vte_cdata.vte_tx_cnt == 0)
947 			ifp->if_timer = 0;
948 	}
949 }
950 
951 int
952 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
953 {
954 	struct mbuf *m;
955 	bus_dmamap_t map;
956 	int error;
957 
958 	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
959 	if (m == NULL)
960 		return (ENOBUFS);
961 	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
962 	if (!(m->m_flags & M_EXT)) {
963 		m_freem(m);
964 		return (ENOBUFS);
965 	}
966 	m->m_len = m->m_pkthdr.len = MCLBYTES;
967 	m_adj(m, sizeof(uint32_t));
968 
969 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
970 	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
971 
972 	if (error != 0) {
973 		if (!error) {
974 			bus_dmamap_unload(sc->sc_dmat,
975 			    sc->vte_cdata.vte_rx_sparemap);
976 			error = EFBIG;
977 			printf("%s: too many segments?!\n",
978 			    sc->sc_dev.dv_xname);
979 		}
980 		m_freem(m);
981 
982 		if (init)
983 			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
984 		return (error);
985 	}
986 
987 	if (rxd->rx_m != NULL) {
988 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
989 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
990 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
991 	}
992 	map = rxd->rx_dmamap;
993 	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
994 	sc->vte_cdata.vte_rx_sparemap = map;
995 
996 	rxd->rx_m = m;
997 	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
998 	rxd->rx_desc->drlen =
999 	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
1000 	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1001 
1002 	return (0);
1003 }
1004 
1005 void
1006 vte_rxeof(struct vte_softc *sc)
1007 {
1008 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1009 	struct vte_rxdesc *rxd;
1010 	struct mbuf *m;
1011 	uint16_t status, total_len;
1012 	int cons, prog;
1013 
1014 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1015 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1016 	cons = sc->vte_cdata.vte_rx_cons;
1017 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1018 	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1019 		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1020 		status = letoh16(rxd->rx_desc->drst);
1021 		if (status & VTE_DRST_RX_OWN)
1022 			break;
1023 		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1024 		m = rxd->rx_m;
1025 		if ((status & VTE_DRST_RX_OK) == 0) {
1026 			/* Discard errored frame. */
1027 			rxd->rx_desc->drlen =
1028 			    htole16(MCLBYTES - sizeof(uint32_t));
1029 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1030 			continue;
1031 		}
1032 		if (vte_newbuf(sc, rxd, 0) != 0) {
1033 			ifp->if_iqdrops++;
1034 			rxd->rx_desc->drlen =
1035 			    htole16(MCLBYTES - sizeof(uint32_t));
1036 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1037 			continue;
1038 		}
1039 
1040 		/*
1041 		 * It seems there is no way to strip FCS bytes.
1042 		 */
1043 		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1044 		m->m_pkthdr.rcvif = ifp;
1045 
1046 #if NBPFILTER > 0
1047 		if (ifp->if_bpf)
1048 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1049 #endif
1050 
1051 		ether_input_mbuf(ifp, m);
1052 	}
1053 
1054 	if (prog > 0) {
1055 		/* Update the consumer index. */
1056 		sc->vte_cdata.vte_rx_cons = cons;
1057 		/*
1058 		 * Sync updated RX descriptors such that controller see
1059 		 * modified RX buffer addresses.
1060 		 */
1061 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1062 		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1063 		    BUS_DMASYNC_PREWRITE);
1064 #ifdef notyet
1065 		/*
1066 		 * Update residue counter.  Controller does not
1067 		 * keep track of number of available RX descriptors
1068 		 * such that driver should have to update VTE_MRDCR
1069 		 * to make controller know how many free RX
1070 		 * descriptors were added to controller.  This is
1071 		 * a similar mechanism used in VIA velocity
1072 		 * controllers and it indicates controller just
1073 		 * polls OWN bit of current RX descriptor pointer.
1074 		 * A couple of severe issues were seen on sample
1075 		 * board where the controller continuously emits TX
1076 		 * pause frames once RX pause threshold crossed.
1077 		 * Once triggered it never recovered form that
1078 		 * state, I couldn't find a way to make it back to
1079 		 * work at least.  This issue effectively
1080 		 * disconnected the system from network.  Also, the
1081 		 * controller used 00:00:00:00:00:00 as source
1082 		 * station address of TX pause frame. Probably this
1083 		 * is one of reason why vendor recommends not to
1084 		 * enable flow control on R6040 controller.
1085 		 */
1086 		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1087 		    (((VTE_RX_RING_CNT * 2) / 10) <<
1088 		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1089 #endif
1090 	}
1091 }
1092 
1093 void
1094 vte_tick(void *arg)
1095 {
1096 	struct vte_softc *sc = arg;
1097 	struct mii_data *mii = &sc->sc_miibus;
1098 	int s;
1099 
1100 	s = splnet();
1101 	mii_tick(mii);
1102 	vte_stats_update(sc);
1103 	timeout_add_sec(&sc->vte_tick_ch, 1);
1104 	splx(s);
1105 }
1106 
1107 void
1108 vte_reset(struct vte_softc *sc)
1109 {
1110 	uint16_t mcr;
1111 	int i;
1112 
1113 	mcr = CSR_READ_2(sc, VTE_MCR1);
1114 	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1115 	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1116 		DELAY(10);
1117 		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1118 			break;
1119 	}
1120 	if (i == 0)
1121 		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1122 		    mcr);
1123 	/*
1124 	 * Follow the guide of vendor recommended way to reset MAC.
1125 	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1126 	 * not reliable so manually reset internal state machine.
1127 	 */
1128 	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1129 	CSR_WRITE_2(sc, VTE_MACSM, 0);
1130 	DELAY(5000);
1131 }
1132 
1133 int
1134 vte_init(struct ifnet *ifp)
1135 {
1136 	struct vte_softc *sc = ifp->if_softc;
1137 	bus_addr_t paddr;
1138 	uint8_t *eaddr;
1139 	int error;
1140 
1141 	/*
1142 	 * Cancel any pending I/O.
1143 	 */
1144 	vte_stop(sc);
1145 	/*
1146 	 * Reset the chip to a known state.
1147 	 */
1148 	vte_reset(sc);
1149 
1150 	/* Initialize RX descriptors. */
1151 	error = vte_init_rx_ring(sc);
1152 	if (error != 0) {
1153 		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1154 		vte_stop(sc);
1155 		return (error);
1156 	}
1157 	error = vte_init_tx_ring(sc);
1158 	if (error != 0) {
1159 		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1160 		vte_stop(sc);
1161 		return (error);
1162 	}
1163 
1164 	/*
1165 	 * Reprogram the station address.  Controller supports up
1166 	 * to 4 different station addresses so driver programs the
1167 	 * first station address as its own ethernet address and
1168 	 * configure the remaining three addresses as perfect
1169 	 * multicast addresses.
1170 	 */
1171 	eaddr = LLADDR(ifp->if_sadl);
1172 	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1173 	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1174 	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1175 
1176 	/* Set TX descriptor base addresses. */
1177 	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1178 	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1179 	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1180 	/* Set RX descriptor base addresses. */
1181 	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1182 	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1183 	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1184 	/*
1185 	 * Initialize RX descriptor residue counter and set RX
1186 	 * pause threshold to 20% of available RX descriptors.
1187 	 * See comments on vte_rxeof() for details on flow control
1188 	 * issues.
1189 	 */
1190 	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1191 	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1192 
1193 	/*
1194 	 * Always use maximum frame size that controller can
1195 	 * support.  Otherwise received frames that has longer
1196 	 * frame length than vte(4) MTU would be silently dropped
1197 	 * in controller.  This would break path-MTU discovery as
1198 	 * sender wouldn't get any responses from receiver. The
1199 	 * RX buffer size should be multiple of 4.
1200 	 * Note, jumbo frames are silently ignored by controller
1201 	 * and even MAC counters do not detect them.
1202 	 */
1203 	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1204 
1205 	/* Configure FIFO. */
1206 	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1207 	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1208 	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1209 
1210 	/*
1211 	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1212 	 * control configuration is done after detecting a valid
1213 	 * link.  Note, we don't generate early interrupt here
1214 	 * as well since FreeBSD does not have interrupt latency
1215 	 * problems like Windows.
1216 	 */
1217 	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1218 	/*
1219 	 * We manually keep track of PHY status changes to
1220 	 * configure resolved duplex and flow control since only
1221 	 * duplex configuration can be automatically reflected to
1222 	 * MCR0.
1223 	 */
1224 	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1225 	    MCR1_EXCESS_COL_RETRY_16);
1226 
1227 	/* Initialize RX filter. */
1228 	vte_iff(sc);
1229 
1230 	/* Disable TX/RX interrupt moderation control. */
1231 	CSR_WRITE_2(sc, VTE_MRICR, 0);
1232 	CSR_WRITE_2(sc, VTE_MTICR, 0);
1233 
1234 	/* Enable MAC event counter interrupts. */
1235 	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1236 	/* Clear MAC statistics. */
1237 	vte_stats_clear(sc);
1238 
1239 	/* Acknowledge all pending interrupts and clear it. */
1240 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1241 	CSR_WRITE_2(sc, VTE_MISR, 0);
1242 
1243 	sc->vte_flags &= ~VTE_FLAG_LINK;
1244 	/* Switch to the current media. */
1245 	vte_mediachange(ifp);
1246 
1247 	timeout_add_sec(&sc->vte_tick_ch, 1);
1248 
1249 	ifp->if_flags |= IFF_RUNNING;
1250 	ifp->if_flags &= ~IFF_OACTIVE;
1251 
1252 	return (0);
1253 }
1254 
1255 void
1256 vte_stop(struct vte_softc *sc)
1257 {
1258 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1259 	struct vte_txdesc *txd;
1260 	struct vte_rxdesc *rxd;
1261 	int i;
1262 
1263 	/*
1264 	 * Mark the interface down and cancel the watchdog timer.
1265 	 */
1266 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1267 	ifp->if_timer = 0;
1268 	sc->vte_flags &= ~VTE_FLAG_LINK;
1269 	timeout_del(&sc->vte_tick_ch);
1270 	vte_stats_update(sc);
1271 	/* Disable interrupts. */
1272 	CSR_WRITE_2(sc, VTE_MIER, 0);
1273 	CSR_WRITE_2(sc, VTE_MECIER, 0);
1274 	/* Stop RX/TX MACs. */
1275 	vte_stop_mac(sc);
1276 	/* Clear interrupts. */
1277 	CSR_READ_2(sc, VTE_MISR);
1278 	/*
1279 	 * Free TX/RX mbufs still in the queues.
1280 	 */
1281 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1282 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1283 		if (rxd->rx_m != NULL) {
1284 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1285 			m_freem(rxd->rx_m);
1286 			rxd->rx_m = NULL;
1287 		}
1288 	}
1289 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1290 		txd = &sc->vte_cdata.vte_txdesc[i];
1291 		if (txd->tx_m != NULL) {
1292 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1293 			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1294 				m_freem(txd->tx_m);
1295 			txd->tx_m = NULL;
1296 			txd->tx_flags &= ~VTE_TXMBUF;
1297 		}
1298 	}
1299 	/* Free TX mbuf pools used for deep copy. */
1300 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1301 		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1302 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1303 			sc->vte_cdata.vte_txmbufs[i] = NULL;
1304 		}
1305 	}
1306 }
1307 
1308 void
1309 vte_start_mac(struct vte_softc *sc)
1310 {
1311 	uint16_t mcr;
1312 	int i;
1313 
1314 	/* Enable RX/TX MACs. */
1315 	mcr = CSR_READ_2(sc, VTE_MCR0);
1316 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1317 	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1318 		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1319 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1320 		for (i = VTE_TIMEOUT; i > 0; i--) {
1321 			mcr = CSR_READ_2(sc, VTE_MCR0);
1322 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1323 			    (MCR0_RX_ENB | MCR0_TX_ENB))
1324 				break;
1325 			DELAY(10);
1326 		}
1327 		if (i == 0)
1328 			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1329 			    sc->sc_dev.dv_xname, mcr);
1330 	}
1331 }
1332 
1333 void
1334 vte_stop_mac(struct vte_softc *sc)
1335 {
1336 	uint16_t mcr;
1337 	int i;
1338 
1339 	/* Disable RX/TX MACs. */
1340 	mcr = CSR_READ_2(sc, VTE_MCR0);
1341 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1342 		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1343 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1344 		for (i = VTE_TIMEOUT; i > 0; i--) {
1345 			mcr = CSR_READ_2(sc, VTE_MCR0);
1346 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1347 				break;
1348 			DELAY(10);
1349 		}
1350 		if (i == 0)
1351 			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1352 			    sc->sc_dev.dv_xname, mcr);
1353 	}
1354 }
1355 
1356 int
1357 vte_init_tx_ring(struct vte_softc *sc)
1358 {
1359 	struct vte_tx_desc *desc;
1360 	struct vte_txdesc *txd;
1361 	bus_addr_t addr;
1362 	int i;
1363 
1364 	sc->vte_cdata.vte_tx_prod = 0;
1365 	sc->vte_cdata.vte_tx_cons = 0;
1366 	sc->vte_cdata.vte_tx_cnt = 0;
1367 
1368 	/* Pre-allocate TX mbufs for deep copy. */
1369 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1370 		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1371 		    M_DONTWAIT, MT_DATA);
1372 		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1373 			return (ENOBUFS);
1374 		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1375 		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1376 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1377 			return (ENOBUFS);
1378 		}
1379 		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1380 		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1381 	}
1382 	desc = sc->vte_cdata.vte_tx_ring;
1383 	bzero(desc, VTE_TX_RING_SZ);
1384 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1385 		txd = &sc->vte_cdata.vte_txdesc[i];
1386 		txd->tx_m = NULL;
1387 		if (i != VTE_TX_RING_CNT - 1)
1388 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1389 			    sizeof(struct vte_tx_desc) * (i + 1);
1390 		else
1391 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1392 			    sizeof(struct vte_tx_desc) * 0;
1393 		desc = &sc->vte_cdata.vte_tx_ring[i];
1394 		desc->dtnp = htole32(addr);
1395 		txd->tx_desc = desc;
1396 	}
1397 
1398 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1399 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1400 	return (0);
1401 }
1402 
1403 int
1404 vte_init_rx_ring(struct vte_softc *sc)
1405 {
1406 	struct vte_rx_desc *desc;
1407 	struct vte_rxdesc *rxd;
1408 	bus_addr_t addr;
1409 	int i;
1410 
1411 	sc->vte_cdata.vte_rx_cons = 0;
1412 	desc = sc->vte_cdata.vte_rx_ring;
1413 	bzero(desc, VTE_RX_RING_SZ);
1414 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1415 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1416 		rxd->rx_m = NULL;
1417 		if (i != VTE_RX_RING_CNT - 1)
1418 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1419 			    sizeof(struct vte_rx_desc) * (i + 1);
1420 		else
1421 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1422 			    sizeof(struct vte_rx_desc) * 0;
1423 		desc = &sc->vte_cdata.vte_rx_ring[i];
1424 		desc->drnp = htole32(addr);
1425 		rxd->rx_desc = desc;
1426 		if (vte_newbuf(sc, rxd, 1) != 0)
1427 			return (ENOBUFS);
1428 	}
1429 
1430 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1431 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1432 
1433 	return (0);
1434 }
1435 
1436 void
1437 vte_iff(struct vte_softc *sc)
1438 {
1439 	struct arpcom *ac = &sc->sc_arpcom;
1440 	struct ifnet *ifp = &ac->ac_if;
1441 	struct ether_multi *enm;
1442 	struct ether_multistep step;
1443 	uint8_t *eaddr;
1444 	uint32_t crc;
1445 	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1446 	uint16_t mchash[4], mcr;
1447 	int i, nperf;
1448 
1449 	bzero(mchash, sizeof(mchash));
1450 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1451 		rxfilt_perf[i][0] = 0xFFFF;
1452 		rxfilt_perf[i][1] = 0xFFFF;
1453 		rxfilt_perf[i][2] = 0xFFFF;
1454 	}
1455 
1456 	mcr = CSR_READ_2(sc, VTE_MCR0);
1457 	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1458 	ifp->if_flags &= ~IFF_ALLMULTI;
1459 
1460 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1461 		ifp->if_flags |= IFF_ALLMULTI;
1462 		if (ifp->if_flags & IFF_PROMISC)
1463 			mcr |= MCR0_PROMISC;
1464 		else
1465 			mcr |= MCR0_MULTICAST;
1466 		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1467 	} else {
1468 		nperf = 0;
1469 		ETHER_FIRST_MULTI(step, ac, enm);
1470 		while (enm != NULL) {
1471 			/*
1472 			 * Program the first 3 multicast groups into
1473 			 * the perfect filter.  For all others, use the
1474 			 * hash table.
1475 			 */
1476 			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1477 				eaddr = enm->enm_addrlo;
1478 				rxfilt_perf[nperf][0] =
1479 				    eaddr[1] << 8 | eaddr[0];
1480 				rxfilt_perf[nperf][1] =
1481 				    eaddr[3] << 8 | eaddr[2];
1482 				rxfilt_perf[nperf][2] =
1483 				    eaddr[5] << 8 | eaddr[4];
1484 				nperf++;
1485 				continue;
1486 			}
1487 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1488 			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1489 			ETHER_NEXT_MULTI(step, enm);
1490 		}
1491 		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1492 		    mchash[3] != 0)
1493 			mcr |= MCR0_MULTICAST;
1494 	}
1495 
1496 	/* Program multicast hash table. */
1497 	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1498 	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1499 	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1500 	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1501 	/* Program perfect filter table. */
1502 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1503 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1504 		    rxfilt_perf[i][0]);
1505 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1506 		    rxfilt_perf[i][1]);
1507 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1508 		    rxfilt_perf[i][2]);
1509 	}
1510 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1511 	CSR_READ_2(sc, VTE_MCR0);
1512 }
1513