xref: /openbsd-src/sys/dev/pci/if_vte.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: if_vte.c,v 1.18 2016/04/13 10:34:32 mpi Exp $	*/
2 /*-
3  * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30 
31 #include "bpfilter.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/queue.h>
40 #include <sys/kernel.h>
41 #include <sys/device.h>
42 #include <sys/timeout.h>
43 #include <sys/socket.h>
44 
45 #include <machine/bus.h>
46 
47 #include <net/if.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57 
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64 
65 #include <dev/pci/if_vtereg.h>
66 
67 int	vte_match(struct device *, void *, void *);
68 void	vte_attach(struct device *, struct device *, void *);
69 int	vte_detach(struct device *, int);
70 
71 int	vte_miibus_readreg(struct device *, int, int);
72 void	vte_miibus_writereg(struct device *, int, int, int);
73 void	vte_miibus_statchg(struct device *);
74 
75 int	vte_init(struct ifnet *);
76 void	vte_start(struct ifnet *);
77 int	vte_ioctl(struct ifnet *, u_long, caddr_t);
78 void	vte_watchdog(struct ifnet *);
79 int	vte_mediachange(struct ifnet *);
80 void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
81 
82 int	vte_intr(void *);
83 int	vte_dma_alloc(struct vte_softc *);
84 void	vte_dma_free(struct vte_softc *);
85 struct vte_txdesc *
86 	    vte_encap(struct vte_softc *, struct mbuf **);
87 void	vte_get_macaddr(struct vte_softc *);
88 int	vte_init_rx_ring(struct vte_softc *);
89 int	vte_init_tx_ring(struct vte_softc *);
90 void	vte_mac_config(struct vte_softc *);
91 int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
92 void	vte_reset(struct vte_softc *);
93 void	vte_rxeof(struct vte_softc *);
94 void	vte_iff(struct vte_softc *);
95 void	vte_start_mac(struct vte_softc *);
96 void	vte_stats_clear(struct vte_softc *);
97 void	vte_stats_update(struct vte_softc *);
98 void	vte_stop(struct vte_softc *);
99 void	vte_stop_mac(struct vte_softc *);
100 void	vte_tick(void *);
101 void	vte_txeof(struct vte_softc *);
102 
103 const struct pci_matchid vte_devices[] = {
104 	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
105 };
106 
107 struct cfattach vte_ca = {
108 	sizeof(struct vte_softc), vte_match, vte_attach
109 };
110 
111 struct cfdriver vte_cd = {
112 	NULL, "vte", DV_IFNET
113 };
114 
115 int vtedebug = 0;
116 #define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
117 
118 int
119 vte_miibus_readreg(struct device *dev, int phy, int reg)
120 {
121 	struct vte_softc *sc = (struct vte_softc *)dev;
122 	int i;
123 
124 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
125 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
126 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
127 		DELAY(5);
128 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
129 			break;
130 	}
131 
132 	if (i == 0) {
133 		printf("%s: phy read timeout: phy %d, reg %d\n",
134 		    sc->sc_dev.dv_xname, phy, reg);
135 		return (0);
136 	}
137 
138 	return (CSR_READ_2(sc, VTE_MMRD));
139 }
140 
141 void
142 vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
143 {
144 	struct vte_softc *sc = (struct vte_softc *)dev;
145 	int i;
146 
147 	CSR_WRITE_2(sc, VTE_MMWD, val);
148 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
149 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
150 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
151 		DELAY(5);
152 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
153 			break;
154 	}
155 
156 	if (i == 0)
157 		printf("%s: phy write timeout: phy %d, reg %d\n",
158 		    sc->sc_dev.dv_xname, phy, reg);
159 }
160 
161 void
162 vte_miibus_statchg(struct device *dev)
163 {
164 	struct vte_softc *sc = (struct vte_softc *)dev;
165 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
166 	struct mii_data *mii;
167 	uint16_t val;
168 
169 	if ((ifp->if_flags & IFF_RUNNING) == 0)
170 		return;
171 
172 	mii = &sc->sc_miibus;
173 
174 	sc->vte_flags &= ~VTE_FLAG_LINK;
175 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
176 	    (IFM_ACTIVE | IFM_AVALID)) {
177 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
178 		case IFM_10_T:
179 		case IFM_100_TX:
180 			sc->vte_flags |= VTE_FLAG_LINK;
181 			break;
182 		default:
183 			break;
184 		}
185 	}
186 
187 	/* Stop RX/TX MACs. */
188 	vte_stop_mac(sc);
189 	/* Program MACs with resolved duplex and flow control. */
190 	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
191 		/*
192 		 * Timer waiting time : (63 + TIMER * 64) MII clock.
193 		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
194 		 */
195 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
196 			val = 18 << VTE_IM_TIMER_SHIFT;
197 		else
198 			val = 1 << VTE_IM_TIMER_SHIFT;
199 		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
200 		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
201 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
202 		CSR_WRITE_2(sc, VTE_MRICR, val);
203 
204 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
205 			val = 18 << VTE_IM_TIMER_SHIFT;
206 		else
207 			val = 1 << VTE_IM_TIMER_SHIFT;
208 		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
209 		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
210 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
211 		CSR_WRITE_2(sc, VTE_MTICR, val);
212 
213 		vte_mac_config(sc);
214 		vte_start_mac(sc);
215 	}
216 }
217 
218 void
219 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
220 {
221 	struct vte_softc *sc = ifp->if_softc;
222 	struct mii_data *mii = &sc->sc_miibus;
223 
224 	mii_pollstat(mii);
225 	ifmr->ifm_status = mii->mii_media_status;
226 	ifmr->ifm_active = mii->mii_media_active;
227 }
228 
229 int
230 vte_mediachange(struct ifnet *ifp)
231 {
232 	struct vte_softc *sc = ifp->if_softc;
233 	struct mii_data *mii = &sc->sc_miibus;
234 	int error;
235 
236 	if (mii->mii_instance != 0) {
237 		struct mii_softc *miisc;
238 
239 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
240 			mii_phy_reset(miisc);
241 	}
242 	error = mii_mediachg(mii);
243 
244 	return (error);
245 }
246 
247 int
248 vte_match(struct device *dev, void *match, void *aux)
249 {
250 	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
251 	    sizeof(vte_devices) / sizeof(vte_devices[0]));
252 }
253 
254 void
255 vte_get_macaddr(struct vte_softc *sc)
256 {
257 	uint16_t mid;
258 
259 	/*
260 	 * It seems there is no way to reload station address and
261 	 * it is supposed to be set by BIOS.
262 	 */
263 	mid = CSR_READ_2(sc, VTE_MID0L);
264 	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
265 	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
266 	mid = CSR_READ_2(sc, VTE_MID0M);
267 	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
268 	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
269 	mid = CSR_READ_2(sc, VTE_MID0H);
270 	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
271 	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
272 }
273 
274 void
275 vte_attach(struct device *parent, struct device *self, void *aux)
276 {
277 	struct vte_softc *sc = (struct vte_softc *)self;
278 	struct pci_attach_args *pa = aux;
279 	pci_chipset_tag_t pc = pa->pa_pc;
280 	pci_intr_handle_t ih;
281 	const char *intrstr;
282 	struct ifnet *ifp;
283 	pcireg_t memtype;
284 	int error = 0;
285 
286 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
287 	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
288 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
289 		printf(": can't map mem space\n");
290 		return;
291 	}
292 
293 	if (pci_intr_map(pa, &ih) != 0) {
294 		printf(": can't map interrupt\n");
295 		goto fail;
296 	}
297 
298   	/*
299 	 * Allocate IRQ
300 	 */
301 	intrstr = pci_intr_string(pc, ih);
302 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
303 	    sc->sc_dev.dv_xname);
304 	if (sc->sc_irq_handle == NULL) {
305 		printf(": could not establish interrupt");
306 		if (intrstr != NULL)
307 			printf(" at %s", intrstr);
308 		printf("\n");
309 		goto fail;
310 	}
311 	printf(": %s", intrstr);
312 
313 	sc->sc_dmat = pa->pa_dmat;
314 	sc->sc_pct = pa->pa_pc;
315 	sc->sc_pcitag = pa->pa_tag;
316 
317 	/* Reset the ethernet controller. */
318 	vte_reset(sc);
319 
320 	error = vte_dma_alloc(sc);
321 	if (error)
322 		goto fail;
323 
324 	/* Load station address. */
325 	vte_get_macaddr(sc);
326 
327 	ifp = &sc->sc_arpcom.ac_if;
328 	ifp->if_softc = sc;
329 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
330 	ifp->if_ioctl = vte_ioctl;
331 	ifp->if_start = vte_start;
332 	ifp->if_watchdog = vte_watchdog;
333 	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
334 	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
335 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
336 
337 	ifp->if_capabilities = IFCAP_VLAN_MTU;
338 
339 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
340 
341 	/*
342 	 * Set up MII bus.
343 	 * BIOS would have initialized VTE_MPSCCR to catch PHY
344 	 * status changes so driver may be able to extract
345 	 * configured PHY address.  Since it's common to see BIOS
346 	 * fails to initialize the register(including the sample
347 	 * board I have), let mii(4) probe it.  This is more
348 	 * reliable than relying on BIOS's initialization.
349 	 *
350 	 * Advertising flow control capability to mii(4) was
351 	 * intentionally disabled due to severe problems in TX
352 	 * pause frame generation.  See vte_rxeof() for more
353 	 * details.
354 	 */
355 	sc->sc_miibus.mii_ifp = ifp;
356 	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
357 	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
358 	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
359 
360 	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
361 	    vte_mediastatus);
362 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
363 	    MII_OFFSET_ANY, 0);
364 
365 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
366 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
367 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
368 		    0, NULL);
369 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
370 	} else
371 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
372 
373 	if_attach(ifp);
374 	ether_ifattach(ifp);
375 
376 	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
377 	return;
378 fail:
379 	vte_detach(&sc->sc_dev, 0);
380 }
381 
382 int
383 vte_detach(struct device *self, int flags)
384 {
385 	struct vte_softc *sc = (struct vte_softc *)self;
386 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
387 	int s;
388 
389 	s = splnet();
390 	vte_stop(sc);
391 	splx(s);
392 
393 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
394 
395 	/* Delete all remaining media. */
396 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
397 
398 	ether_ifdetach(ifp);
399 	if_detach(ifp);
400 	vte_dma_free(sc);
401 
402 	if (sc->sc_irq_handle != NULL) {
403 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
404 		sc->sc_irq_handle = NULL;
405 	}
406 
407 	return (0);
408 }
409 
410 int
411 vte_dma_alloc(struct vte_softc *sc)
412 {
413 	struct vte_txdesc *txd;
414 	struct vte_rxdesc *rxd;
415 	int error, i, nsegs;
416 
417 	/* Create DMA stuffs for TX ring */
418 	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
419 	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
420 	if (error)
421 		return (ENOBUFS);
422 
423 	/* Allocate DMA'able memory for TX ring */
424 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
425 	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
426 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
427 	if (error) {
428 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
429 		    sc->sc_dev.dv_xname);
430 		return (error);
431 	}
432 
433 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
434 	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
435 	    BUS_DMA_NOWAIT);
436 	if (error)
437 		return (ENOBUFS);
438 
439 	/*  Load the DMA map for Tx ring. */
440 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
441 	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
442 	if (error) {
443 		printf("%s: could not load DMA'able memory for Tx ring.\n",
444 		    sc->sc_dev.dv_xname);
445 		bus_dmamem_free(sc->sc_dmat,
446 		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
447 		return (error);
448 	}
449 
450 	sc->vte_cdata.vte_tx_ring_paddr =
451 	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
452 
453 	/* Create DMA stuffs for RX ring */
454 	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
455 	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
456 	if (error)
457 		return (ENOBUFS);
458 
459 	/* Allocate DMA'able memory for RX ring */
460 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
461 	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
462 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
463 	if (error) {
464 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
465 		    sc->sc_dev.dv_xname);
466 		return (error);
467 	}
468 
469 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
470 	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
471 	    BUS_DMA_NOWAIT);
472 	if (error)
473 		return (ENOBUFS);
474 
475 	/* Load the DMA map for Rx ring. */
476 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
477 	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
478 	if (error) {
479 		printf("%s: could not load DMA'able memory for Rx ring.\n",
480 		    sc->sc_dev.dv_xname);
481 		bus_dmamem_free(sc->sc_dmat,
482 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
483 		return (error);
484 	}
485 
486 	sc->vte_cdata.vte_rx_ring_paddr =
487 	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
488 
489 	/* Create DMA maps for Tx buffers. */
490 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
491 		txd = &sc->vte_cdata.vte_txdesc[i];
492 		txd->tx_m = NULL;
493 		txd->tx_dmamap = NULL;
494 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
495 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
496 		if (error) {
497 			printf("%s: could not create Tx dmamap.\n",
498 			    sc->sc_dev.dv_xname);
499 			return (error);
500 		}
501 	}
502 
503 	/* Create DMA maps for Rx buffers. */
504 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
505 	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
506 	if (error) {
507 		printf("%s: could not create spare Rx dmamap.\n",
508 		    sc->sc_dev.dv_xname);
509 		return (error);
510 	}
511 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
512 		rxd = &sc->vte_cdata.vte_rxdesc[i];
513 		rxd->rx_m = NULL;
514 		rxd->rx_dmamap = NULL;
515 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
516 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
517 		if (error) {
518 			printf("%s: could not create Rx dmamap.\n",
519 			    sc->sc_dev.dv_xname);
520 			return (error);
521 		}
522 	}
523 
524 	return (0);
525 }
526 
527 void
528 vte_dma_free(struct vte_softc *sc)
529 {
530 	struct vte_txdesc *txd;
531 	struct vte_rxdesc *rxd;
532 	int i;
533 
534 	/* TX buffers. */
535 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
536 		txd = &sc->vte_cdata.vte_txdesc[i];
537 		if (txd->tx_dmamap != NULL) {
538 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
539 			txd->tx_dmamap = NULL;
540 		}
541 	}
542 	/* Rx buffers */
543 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
544 		rxd = &sc->vte_cdata.vte_rxdesc[i];
545 		if (rxd->rx_dmamap != NULL) {
546 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
547 			rxd->rx_dmamap = NULL;
548 		}
549 	}
550 	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
551 		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
552 		sc->vte_cdata.vte_rx_sparemap = NULL;
553 	}
554 	/* TX descriptor ring. */
555 	if (sc->vte_cdata.vte_tx_ring_map != NULL)
556 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
557 	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
558 	    sc->vte_cdata.vte_tx_ring != NULL)
559 		bus_dmamem_free(sc->sc_dmat,
560 		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
561 	sc->vte_cdata.vte_tx_ring = NULL;
562 	sc->vte_cdata.vte_tx_ring_map = NULL;
563 	/* RX ring. */
564 	if (sc->vte_cdata.vte_rx_ring_map != NULL)
565 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
566 	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
567 	    sc->vte_cdata.vte_rx_ring != NULL)
568 		bus_dmamem_free(sc->sc_dmat,
569 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
570 	sc->vte_cdata.vte_rx_ring = NULL;
571 	sc->vte_cdata.vte_rx_ring_map = NULL;
572 }
573 
574 struct vte_txdesc *
575 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
576 {
577 	struct vte_txdesc *txd;
578 	struct mbuf *m, *n;
579 	int copy, error, padlen;
580 
581 	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
582 	m = *m_head;
583 	/*
584 	 * Controller doesn't auto-pad, so we have to make sure pad
585 	 * short frames out to the minimum frame length.
586 	 */
587 	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
588 		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
589 	else
590 		padlen = 0;
591 
592 	/*
593 	 * Controller does not support multi-fragmented TX buffers.
594 	 * Controller spends most of its TX processing time in
595 	 * de-fragmenting TX buffers.  Either faster CPU or more
596 	 * advanced controller DMA engine is required to speed up
597 	 * TX path processing.
598 	 * To mitigate the de-fragmenting issue, perform deep copy
599 	 * from fragmented mbuf chains to a pre-allocated mbuf
600 	 * cluster with extra cost of kernel memory.  For frames
601 	 * that is composed of single TX buffer, the deep copy is
602 	 * bypassed.
603 	 */
604 	copy = 0;
605 	if (m->m_next != NULL)
606 		copy++;
607 	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
608 		copy++;
609 	if (copy != 0) {
610 		/* Avoid expensive m_defrag(9) and do deep copy. */
611 		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
612 		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
613 		n->m_pkthdr.len = m->m_pkthdr.len;
614 		n->m_len = m->m_pkthdr.len;
615 		m = n;
616 		txd->tx_flags |= VTE_TXMBUF;
617 	}
618 
619 	if (padlen > 0) {
620 		/* Zero out the bytes in the pad area. */
621 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
622 		m->m_pkthdr.len += padlen;
623 		m->m_len = m->m_pkthdr.len;
624 	}
625 
626 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
627 	    BUS_DMA_NOWAIT);
628 
629 	if (error != 0) {
630 		txd->tx_flags &= ~VTE_TXMBUF;
631 		return (NULL);
632 	}
633 
634 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
635 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
636 
637 	txd->tx_desc->dtlen =
638 	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
639 	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
640 	sc->vte_cdata.vte_tx_cnt++;
641 	/* Update producer index. */
642 	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
643 
644 	/* Finally hand over ownership to controller. */
645 	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
646 	txd->tx_m = m;
647 
648 	return (txd);
649 }
650 
651 void
652 vte_start(struct ifnet *ifp)
653 {
654 	struct vte_softc *sc = ifp->if_softc;
655 	struct vte_txdesc *txd;
656 	struct mbuf *m_head;
657 	int enq = 0;
658 
659 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
660 		return;
661 
662 	for (;;) {
663 		/* Reserve one free TX descriptor. */
664 		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
665 			ifq_set_oactive(&ifp->if_snd);
666 			break;
667 		}
668 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
669 		if (m_head == NULL)
670 			break;
671 
672 		/*
673 		 * Pack the data into the transmit ring. If we
674 		 * don't have room, set the OACTIVE flag and wait
675 		 * for the NIC to drain the ring.
676 		 */
677 		if ((txd = vte_encap(sc, &m_head)) == NULL) {
678 			break;
679 		}
680 
681 		enq++;
682 
683 #if NBPFILTER > 0
684 		/*
685 		 * If there's a BPF listener, bounce a copy of this frame
686 		 * to him.
687 		 */
688 		if (ifp->if_bpf != NULL)
689 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
690 #endif
691 		/* Free consumed TX frame. */
692 		if ((txd->tx_flags & VTE_TXMBUF) != 0)
693 			m_freem(m_head);
694 	}
695 
696 	if (enq > 0) {
697 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
698 		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
699 		    BUS_DMASYNC_PREWRITE);
700 		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
701 		ifp->if_timer = VTE_TX_TIMEOUT;
702 	}
703 }
704 
705 void
706 vte_watchdog(struct ifnet *ifp)
707 {
708 	struct vte_softc *sc = ifp->if_softc;
709 
710 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
711 	ifp->if_oerrors++;
712 	vte_init(ifp);
713 
714 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
715 		vte_start(ifp);
716 }
717 
718 int
719 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
720 {
721 	struct vte_softc *sc = ifp->if_softc;
722 	struct mii_data *mii = &sc->sc_miibus;
723 	struct ifreq *ifr = (struct ifreq *)data;
724 	int s, error = 0;
725 
726 	s = splnet();
727 
728 	switch (cmd) {
729 	case SIOCSIFADDR:
730 		ifp->if_flags |= IFF_UP;
731 		if (!(ifp->if_flags & IFF_RUNNING))
732 			vte_init(ifp);
733 		break;
734 	case SIOCSIFFLAGS:
735 		if (ifp->if_flags & IFF_UP) {
736 			if (ifp->if_flags & IFF_RUNNING)
737 				error = ENETRESET;
738 			else
739 				vte_init(ifp);
740 		} else {
741 			if (ifp->if_flags & IFF_RUNNING)
742 				vte_stop(sc);
743 		}
744 		break;
745 	case SIOCSIFMEDIA:
746 	case SIOCGIFMEDIA:
747 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
748 		break;
749 	default:
750 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
751 		break;
752 	}
753 
754 	if (error == ENETRESET) {
755 		if (ifp->if_flags & IFF_RUNNING)
756 			vte_iff(sc);
757 		error = 0;
758 	}
759 
760 	splx(s);
761 	return (error);
762 }
763 
764 void
765 vte_mac_config(struct vte_softc *sc)
766 {
767 	struct mii_data *mii;
768 	uint16_t mcr;
769 
770 	mii = &sc->sc_miibus;
771 	mcr = CSR_READ_2(sc, VTE_MCR0);
772 	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
773 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
774 		mcr |= MCR0_FULL_DUPLEX;
775 #ifdef notyet
776 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
777 			mcr |= MCR0_FC_ENB;
778 		/*
779 		 * The data sheet is not clear whether the controller
780 		 * honors received pause frames or not.  The is no
781 		 * separate control bit for RX pause frame so just
782 		 * enable MCR0_FC_ENB bit.
783 		 */
784 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
785 			mcr |= MCR0_FC_ENB;
786 #endif
787 	}
788 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
789 }
790 
791 void
792 vte_stats_clear(struct vte_softc *sc)
793 {
794 
795 	/* Reading counter registers clears its contents. */
796 	CSR_READ_2(sc, VTE_CNT_RX_DONE);
797 	CSR_READ_2(sc, VTE_CNT_MECNT0);
798 	CSR_READ_2(sc, VTE_CNT_MECNT1);
799 	CSR_READ_2(sc, VTE_CNT_MECNT2);
800 	CSR_READ_2(sc, VTE_CNT_MECNT3);
801 	CSR_READ_2(sc, VTE_CNT_TX_DONE);
802 	CSR_READ_2(sc, VTE_CNT_MECNT4);
803 	CSR_READ_2(sc, VTE_CNT_PAUSE);
804 }
805 
806 void
807 vte_stats_update(struct vte_softc *sc)
808 {
809 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
810 	struct vte_hw_stats *stat;
811 	uint16_t value;
812 
813 	stat = &sc->vte_stats;
814 
815 	CSR_READ_2(sc, VTE_MECISR);
816 	/* RX stats. */
817 	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
818 	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
819 	stat->rx_bcast_frames += (value >> 8);
820 	stat->rx_mcast_frames += (value & 0xFF);
821 	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
822 	stat->rx_runts += (value >> 8);
823 	stat->rx_crcerrs += (value & 0xFF);
824 	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
825 	stat->rx_long_frames += (value & 0xFF);
826 	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
827 	stat->rx_fifo_full += (value >> 8);
828 	stat->rx_desc_unavail += (value & 0xFF);
829 
830 	/* TX stats. */
831 	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
832 	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
833 	stat->tx_underruns += (value >> 8);
834 	stat->tx_late_colls += (value & 0xFF);
835 
836 	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
837 	stat->tx_pause_frames += (value >> 8);
838 	stat->rx_pause_frames += (value & 0xFF);
839 
840 	/* Update ifp counters. */
841 	ifp->if_opackets = stat->tx_frames;
842 	ifp->if_collisions = stat->tx_late_colls;
843 	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
844 	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
845 	    stat->rx_long_frames + stat->rx_fifo_full;
846 }
847 
848 int
849 vte_intr(void *arg)
850 {
851 	struct vte_softc *sc = arg;
852 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
853 	uint16_t status;
854 	int n;
855 	int claimed = 0;
856 
857 	/* Reading VTE_MISR acknowledges interrupts. */
858 	status = CSR_READ_2(sc, VTE_MISR);
859 	if ((status & VTE_INTRS) == 0)
860 		return (0);
861 
862 	/* Disable interrupts. */
863 	CSR_WRITE_2(sc, VTE_MIER, 0);
864 	for (n = 8; (status & VTE_INTRS) != 0;) {
865 		if ((ifp->if_flags & IFF_RUNNING) == 0)
866 			break;
867 		claimed = 1;
868 		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
869 		    MISR_RX_FIFO_FULL))
870 			vte_rxeof(sc);
871 		if (status & MISR_TX_DONE)
872 			vte_txeof(sc);
873 		if (status & MISR_EVENT_CNT_OFLOW)
874 			vte_stats_update(sc);
875 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
876 			vte_start(ifp);
877 		if (--n > 0)
878 			status = CSR_READ_2(sc, VTE_MISR);
879 		else
880 			break;
881 	}
882 
883 	/* Re-enable interrupts. */
884 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
885 
886 	return (claimed);
887 }
888 
889 void
890 vte_txeof(struct vte_softc *sc)
891 {
892 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
893 	struct vte_txdesc *txd;
894 	uint16_t status;
895 	int cons, prog;
896 
897 	if (sc->vte_cdata.vte_tx_cnt == 0)
898 		return;
899 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
900 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
901 	cons = sc->vte_cdata.vte_tx_cons;
902 	/*
903 	 * Go through our TX list and free mbufs for those
904 	 * frames which have been transmitted.
905 	 */
906 	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
907 		txd = &sc->vte_cdata.vte_txdesc[cons];
908 		status = letoh16(txd->tx_desc->dtst);
909 		if (status & VTE_DTST_TX_OWN)
910 			break;
911 		sc->vte_cdata.vte_tx_cnt--;
912 		/* Reclaim transmitted mbufs. */
913 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
914 		if ((txd->tx_flags & VTE_TXMBUF) == 0)
915 			m_freem(txd->tx_m);
916 		txd->tx_flags &= ~VTE_TXMBUF;
917 		txd->tx_m = NULL;
918 		prog++;
919 		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
920 	}
921 
922 	if (prog > 0) {
923 		ifq_clr_oactive(&ifp->if_snd);
924 		sc->vte_cdata.vte_tx_cons = cons;
925 		/*
926 		 * Unarm watchdog timer only when there is no pending
927 		 * frames in TX queue.
928 		 */
929 		if (sc->vte_cdata.vte_tx_cnt == 0)
930 			ifp->if_timer = 0;
931 	}
932 }
933 
934 int
935 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
936 {
937 	struct mbuf *m;
938 	bus_dmamap_t map;
939 	int error;
940 
941 	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
942 	if (m == NULL)
943 		return (ENOBUFS);
944 	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
945 	if (!(m->m_flags & M_EXT)) {
946 		m_freem(m);
947 		return (ENOBUFS);
948 	}
949 	m->m_len = m->m_pkthdr.len = MCLBYTES;
950 	m_adj(m, sizeof(uint32_t));
951 
952 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
953 	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
954 
955 	if (error != 0) {
956 		if (!error) {
957 			bus_dmamap_unload(sc->sc_dmat,
958 			    sc->vte_cdata.vte_rx_sparemap);
959 			error = EFBIG;
960 			printf("%s: too many segments?!\n",
961 			    sc->sc_dev.dv_xname);
962 		}
963 		m_freem(m);
964 
965 		if (init)
966 			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
967 		return (error);
968 	}
969 
970 	if (rxd->rx_m != NULL) {
971 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
972 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
973 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
974 	}
975 	map = rxd->rx_dmamap;
976 	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
977 	sc->vte_cdata.vte_rx_sparemap = map;
978 
979 	rxd->rx_m = m;
980 	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
981 	rxd->rx_desc->drlen =
982 	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
983 	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
984 
985 	return (0);
986 }
987 
988 void
989 vte_rxeof(struct vte_softc *sc)
990 {
991 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
992 	struct vte_rxdesc *rxd;
993 	struct mbuf *m;
994 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
995 	uint16_t status, total_len;
996 	int cons, prog;
997 
998 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
999 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1000 	cons = sc->vte_cdata.vte_rx_cons;
1001 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1002 	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1003 		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1004 		status = letoh16(rxd->rx_desc->drst);
1005 		if (status & VTE_DRST_RX_OWN)
1006 			break;
1007 		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1008 		m = rxd->rx_m;
1009 		if ((status & VTE_DRST_RX_OK) == 0) {
1010 			/* Discard errored frame. */
1011 			rxd->rx_desc->drlen =
1012 			    htole16(MCLBYTES - sizeof(uint32_t));
1013 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1014 			continue;
1015 		}
1016 		if (vte_newbuf(sc, rxd, 0) != 0) {
1017 			ifp->if_iqdrops++;
1018 			rxd->rx_desc->drlen =
1019 			    htole16(MCLBYTES - sizeof(uint32_t));
1020 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1021 			continue;
1022 		}
1023 
1024 		/*
1025 		 * It seems there is no way to strip FCS bytes.
1026 		 */
1027 		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1028 		ml_enqueue(&ml, m);
1029 	}
1030 
1031 	if_input(ifp, &ml);
1032 
1033 	if (prog > 0) {
1034 		/* Update the consumer index. */
1035 		sc->vte_cdata.vte_rx_cons = cons;
1036 		/*
1037 		 * Sync updated RX descriptors such that controller see
1038 		 * modified RX buffer addresses.
1039 		 */
1040 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1041 		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1042 		    BUS_DMASYNC_PREWRITE);
1043 #ifdef notyet
1044 		/*
1045 		 * Update residue counter.  Controller does not
1046 		 * keep track of number of available RX descriptors
1047 		 * such that driver should have to update VTE_MRDCR
1048 		 * to make controller know how many free RX
1049 		 * descriptors were added to controller.  This is
1050 		 * a similar mechanism used in VIA velocity
1051 		 * controllers and it indicates controller just
1052 		 * polls OWN bit of current RX descriptor pointer.
1053 		 * A couple of severe issues were seen on sample
1054 		 * board where the controller continuously emits TX
1055 		 * pause frames once RX pause threshold crossed.
1056 		 * Once triggered it never recovered form that
1057 		 * state, I couldn't find a way to make it back to
1058 		 * work at least.  This issue effectively
1059 		 * disconnected the system from network.  Also, the
1060 		 * controller used 00:00:00:00:00:00 as source
1061 		 * station address of TX pause frame. Probably this
1062 		 * is one of reason why vendor recommends not to
1063 		 * enable flow control on R6040 controller.
1064 		 */
1065 		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1066 		    (((VTE_RX_RING_CNT * 2) / 10) <<
1067 		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1068 #endif
1069 	}
1070 }
1071 
1072 void
1073 vte_tick(void *arg)
1074 {
1075 	struct vte_softc *sc = arg;
1076 	struct mii_data *mii = &sc->sc_miibus;
1077 	int s;
1078 
1079 	s = splnet();
1080 	mii_tick(mii);
1081 	vte_stats_update(sc);
1082 	timeout_add_sec(&sc->vte_tick_ch, 1);
1083 	splx(s);
1084 }
1085 
1086 void
1087 vte_reset(struct vte_softc *sc)
1088 {
1089 	uint16_t mcr;
1090 	int i;
1091 
1092 	mcr = CSR_READ_2(sc, VTE_MCR1);
1093 	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1094 	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1095 		DELAY(10);
1096 		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1097 			break;
1098 	}
1099 	if (i == 0)
1100 		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1101 		    mcr);
1102 	/*
1103 	 * Follow the guide of vendor recommended way to reset MAC.
1104 	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1105 	 * not reliable so manually reset internal state machine.
1106 	 */
1107 	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1108 	CSR_WRITE_2(sc, VTE_MACSM, 0);
1109 	DELAY(5000);
1110 }
1111 
1112 int
1113 vte_init(struct ifnet *ifp)
1114 {
1115 	struct vte_softc *sc = ifp->if_softc;
1116 	bus_addr_t paddr;
1117 	uint8_t *eaddr;
1118 	int error;
1119 
1120 	/*
1121 	 * Cancel any pending I/O.
1122 	 */
1123 	vte_stop(sc);
1124 	/*
1125 	 * Reset the chip to a known state.
1126 	 */
1127 	vte_reset(sc);
1128 
1129 	/* Initialize RX descriptors. */
1130 	error = vte_init_rx_ring(sc);
1131 	if (error != 0) {
1132 		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1133 		vte_stop(sc);
1134 		return (error);
1135 	}
1136 	error = vte_init_tx_ring(sc);
1137 	if (error != 0) {
1138 		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1139 		vte_stop(sc);
1140 		return (error);
1141 	}
1142 
1143 	/*
1144 	 * Reprogram the station address.  Controller supports up
1145 	 * to 4 different station addresses so driver programs the
1146 	 * first station address as its own ethernet address and
1147 	 * configure the remaining three addresses as perfect
1148 	 * multicast addresses.
1149 	 */
1150 	eaddr = LLADDR(ifp->if_sadl);
1151 	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1152 	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1153 	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1154 
1155 	/* Set TX descriptor base addresses. */
1156 	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1157 	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1158 	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1159 	/* Set RX descriptor base addresses. */
1160 	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1161 	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1162 	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1163 	/*
1164 	 * Initialize RX descriptor residue counter and set RX
1165 	 * pause threshold to 20% of available RX descriptors.
1166 	 * See comments on vte_rxeof() for details on flow control
1167 	 * issues.
1168 	 */
1169 	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1170 	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1171 
1172 	/*
1173 	 * Always use maximum frame size that controller can
1174 	 * support.  Otherwise received frames that has longer
1175 	 * frame length than vte(4) MTU would be silently dropped
1176 	 * in controller.  This would break path-MTU discovery as
1177 	 * sender wouldn't get any responses from receiver. The
1178 	 * RX buffer size should be multiple of 4.
1179 	 * Note, jumbo frames are silently ignored by controller
1180 	 * and even MAC counters do not detect them.
1181 	 */
1182 	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1183 
1184 	/* Configure FIFO. */
1185 	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1186 	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1187 	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1188 
1189 	/*
1190 	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1191 	 * control configuration is done after detecting a valid
1192 	 * link.  Note, we don't generate early interrupt here
1193 	 * as well since FreeBSD does not have interrupt latency
1194 	 * problems like Windows.
1195 	 */
1196 	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1197 	/*
1198 	 * We manually keep track of PHY status changes to
1199 	 * configure resolved duplex and flow control since only
1200 	 * duplex configuration can be automatically reflected to
1201 	 * MCR0.
1202 	 */
1203 	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1204 	    MCR1_EXCESS_COL_RETRY_16);
1205 
1206 	/* Initialize RX filter. */
1207 	vte_iff(sc);
1208 
1209 	/* Disable TX/RX interrupt moderation control. */
1210 	CSR_WRITE_2(sc, VTE_MRICR, 0);
1211 	CSR_WRITE_2(sc, VTE_MTICR, 0);
1212 
1213 	/* Enable MAC event counter interrupts. */
1214 	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1215 	/* Clear MAC statistics. */
1216 	vte_stats_clear(sc);
1217 
1218 	/* Acknowledge all pending interrupts and clear it. */
1219 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1220 	CSR_WRITE_2(sc, VTE_MISR, 0);
1221 
1222 	sc->vte_flags &= ~VTE_FLAG_LINK;
1223 	/* Switch to the current media. */
1224 	vte_mediachange(ifp);
1225 
1226 	timeout_add_sec(&sc->vte_tick_ch, 1);
1227 
1228 	ifp->if_flags |= IFF_RUNNING;
1229 	ifq_clr_oactive(&ifp->if_snd);
1230 
1231 	return (0);
1232 }
1233 
1234 void
1235 vte_stop(struct vte_softc *sc)
1236 {
1237 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1238 	struct vte_txdesc *txd;
1239 	struct vte_rxdesc *rxd;
1240 	int i;
1241 
1242 	/*
1243 	 * Mark the interface down and cancel the watchdog timer.
1244 	 */
1245 	ifp->if_flags &= ~IFF_RUNNING;
1246 	ifq_clr_oactive(&ifp->if_snd);
1247 	ifp->if_timer = 0;
1248 	sc->vte_flags &= ~VTE_FLAG_LINK;
1249 	timeout_del(&sc->vte_tick_ch);
1250 	vte_stats_update(sc);
1251 	/* Disable interrupts. */
1252 	CSR_WRITE_2(sc, VTE_MIER, 0);
1253 	CSR_WRITE_2(sc, VTE_MECIER, 0);
1254 	/* Stop RX/TX MACs. */
1255 	vte_stop_mac(sc);
1256 	/* Clear interrupts. */
1257 	CSR_READ_2(sc, VTE_MISR);
1258 	/*
1259 	 * Free TX/RX mbufs still in the queues.
1260 	 */
1261 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1262 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1263 		if (rxd->rx_m != NULL) {
1264 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1265 			m_freem(rxd->rx_m);
1266 			rxd->rx_m = NULL;
1267 		}
1268 	}
1269 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1270 		txd = &sc->vte_cdata.vte_txdesc[i];
1271 		if (txd->tx_m != NULL) {
1272 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1273 			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1274 				m_freem(txd->tx_m);
1275 			txd->tx_m = NULL;
1276 			txd->tx_flags &= ~VTE_TXMBUF;
1277 		}
1278 	}
1279 	/* Free TX mbuf pools used for deep copy. */
1280 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1281 		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1282 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1283 			sc->vte_cdata.vte_txmbufs[i] = NULL;
1284 		}
1285 	}
1286 }
1287 
1288 void
1289 vte_start_mac(struct vte_softc *sc)
1290 {
1291 	uint16_t mcr;
1292 	int i;
1293 
1294 	/* Enable RX/TX MACs. */
1295 	mcr = CSR_READ_2(sc, VTE_MCR0);
1296 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1297 	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1298 		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1299 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1300 		for (i = VTE_TIMEOUT; i > 0; i--) {
1301 			mcr = CSR_READ_2(sc, VTE_MCR0);
1302 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1303 			    (MCR0_RX_ENB | MCR0_TX_ENB))
1304 				break;
1305 			DELAY(10);
1306 		}
1307 		if (i == 0)
1308 			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1309 			    sc->sc_dev.dv_xname, mcr);
1310 	}
1311 }
1312 
1313 void
1314 vte_stop_mac(struct vte_softc *sc)
1315 {
1316 	uint16_t mcr;
1317 	int i;
1318 
1319 	/* Disable RX/TX MACs. */
1320 	mcr = CSR_READ_2(sc, VTE_MCR0);
1321 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1322 		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1323 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1324 		for (i = VTE_TIMEOUT; i > 0; i--) {
1325 			mcr = CSR_READ_2(sc, VTE_MCR0);
1326 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1327 				break;
1328 			DELAY(10);
1329 		}
1330 		if (i == 0)
1331 			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1332 			    sc->sc_dev.dv_xname, mcr);
1333 	}
1334 }
1335 
1336 int
1337 vte_init_tx_ring(struct vte_softc *sc)
1338 {
1339 	struct vte_tx_desc *desc;
1340 	struct vte_txdesc *txd;
1341 	bus_addr_t addr;
1342 	int i;
1343 
1344 	sc->vte_cdata.vte_tx_prod = 0;
1345 	sc->vte_cdata.vte_tx_cons = 0;
1346 	sc->vte_cdata.vte_tx_cnt = 0;
1347 
1348 	/* Pre-allocate TX mbufs for deep copy. */
1349 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1350 		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1351 		    M_DONTWAIT, MT_DATA);
1352 		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1353 			return (ENOBUFS);
1354 		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1355 		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1356 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1357 			return (ENOBUFS);
1358 		}
1359 		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1360 		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1361 	}
1362 	desc = sc->vte_cdata.vte_tx_ring;
1363 	bzero(desc, VTE_TX_RING_SZ);
1364 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1365 		txd = &sc->vte_cdata.vte_txdesc[i];
1366 		txd->tx_m = NULL;
1367 		if (i != VTE_TX_RING_CNT - 1)
1368 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1369 			    sizeof(struct vte_tx_desc) * (i + 1);
1370 		else
1371 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1372 			    sizeof(struct vte_tx_desc) * 0;
1373 		desc = &sc->vte_cdata.vte_tx_ring[i];
1374 		desc->dtnp = htole32(addr);
1375 		txd->tx_desc = desc;
1376 	}
1377 
1378 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1379 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1380 	return (0);
1381 }
1382 
1383 int
1384 vte_init_rx_ring(struct vte_softc *sc)
1385 {
1386 	struct vte_rx_desc *desc;
1387 	struct vte_rxdesc *rxd;
1388 	bus_addr_t addr;
1389 	int i;
1390 
1391 	sc->vte_cdata.vte_rx_cons = 0;
1392 	desc = sc->vte_cdata.vte_rx_ring;
1393 	bzero(desc, VTE_RX_RING_SZ);
1394 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1395 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1396 		rxd->rx_m = NULL;
1397 		if (i != VTE_RX_RING_CNT - 1)
1398 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1399 			    sizeof(struct vte_rx_desc) * (i + 1);
1400 		else
1401 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1402 			    sizeof(struct vte_rx_desc) * 0;
1403 		desc = &sc->vte_cdata.vte_rx_ring[i];
1404 		desc->drnp = htole32(addr);
1405 		rxd->rx_desc = desc;
1406 		if (vte_newbuf(sc, rxd, 1) != 0)
1407 			return (ENOBUFS);
1408 	}
1409 
1410 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1411 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1412 
1413 	return (0);
1414 }
1415 
1416 void
1417 vte_iff(struct vte_softc *sc)
1418 {
1419 	struct arpcom *ac = &sc->sc_arpcom;
1420 	struct ifnet *ifp = &ac->ac_if;
1421 	struct ether_multi *enm;
1422 	struct ether_multistep step;
1423 	uint8_t *eaddr;
1424 	uint32_t crc;
1425 	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1426 	uint16_t mchash[4], mcr;
1427 	int i, nperf;
1428 
1429 	bzero(mchash, sizeof(mchash));
1430 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1431 		rxfilt_perf[i][0] = 0xFFFF;
1432 		rxfilt_perf[i][1] = 0xFFFF;
1433 		rxfilt_perf[i][2] = 0xFFFF;
1434 	}
1435 
1436 	mcr = CSR_READ_2(sc, VTE_MCR0);
1437 	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1438 	ifp->if_flags &= ~IFF_ALLMULTI;
1439 
1440 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1441 		ifp->if_flags |= IFF_ALLMULTI;
1442 		if (ifp->if_flags & IFF_PROMISC)
1443 			mcr |= MCR0_PROMISC;
1444 		else
1445 			mcr |= MCR0_MULTICAST;
1446 		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1447 	} else {
1448 		nperf = 0;
1449 		ETHER_FIRST_MULTI(step, ac, enm);
1450 		while (enm != NULL) {
1451 			/*
1452 			 * Program the first 3 multicast groups into
1453 			 * the perfect filter.  For all others, use the
1454 			 * hash table.
1455 			 */
1456 			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1457 				eaddr = enm->enm_addrlo;
1458 				rxfilt_perf[nperf][0] =
1459 				    eaddr[1] << 8 | eaddr[0];
1460 				rxfilt_perf[nperf][1] =
1461 				    eaddr[3] << 8 | eaddr[2];
1462 				rxfilt_perf[nperf][2] =
1463 				    eaddr[5] << 8 | eaddr[4];
1464 				nperf++;
1465 				continue;
1466 			}
1467 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1468 			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1469 			ETHER_NEXT_MULTI(step, enm);
1470 		}
1471 		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1472 		    mchash[3] != 0)
1473 			mcr |= MCR0_MULTICAST;
1474 	}
1475 
1476 	/* Program multicast hash table. */
1477 	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1478 	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1479 	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1480 	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1481 	/* Program perfect filter table. */
1482 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1483 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1484 		    rxfilt_perf[i][0]);
1485 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1486 		    rxfilt_perf[i][1]);
1487 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1488 		    rxfilt_perf[i][2]);
1489 	}
1490 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1491 	CSR_READ_2(sc, VTE_MCR0);
1492 }
1493